Merge branch 'linus' into sched/core
Resolve cherry-picking conflicts:
Conflicts:
mm/huge_memory.c
mm/memory.c
mm/mprotect.c
See this upstream merge commit for more details:
52469b4fcd
Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
fb10d5b7ef
|
@ -37,8 +37,8 @@ Description:
|
|||
that the USB device has been connected to the machine. This
|
||||
file is read-only.
|
||||
Users:
|
||||
PowerTOP <power@bughost.org>
|
||||
http://www.lesswatts.org/projects/powertop/
|
||||
PowerTOP <powertop@lists.01.org>
|
||||
https://01.org/powertop/
|
||||
|
||||
What: /sys/bus/usb/device/.../power/active_duration
|
||||
Date: January 2008
|
||||
|
@ -57,8 +57,8 @@ Description:
|
|||
will give an integer percentage. Note that this does not
|
||||
account for counter wrap.
|
||||
Users:
|
||||
PowerTOP <power@bughost.org>
|
||||
http://www.lesswatts.org/projects/powertop/
|
||||
PowerTOP <powertop@lists.01.org>
|
||||
https://01.org/powertop/
|
||||
|
||||
What: /sys/bus/usb/devices/<busnum>-<port[.port]>...:<config num>-<interface num>/supports_autosuspend
|
||||
Date: January 2008
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
What: /sys/devices/.../power/
|
||||
Date: January 2009
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../power directory contains attributes
|
||||
allowing the user space to check and modify some power
|
||||
|
@ -8,7 +8,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup
|
||||
Date: January 2009
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../power/wakeup attribute allows the user
|
||||
space to check if the device is enabled to wake up the system
|
||||
|
@ -34,7 +34,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/control
|
||||
Date: January 2009
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../power/control attribute allows the user
|
||||
space to control the run-time power management of the device.
|
||||
|
@ -53,7 +53,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/async
|
||||
Date: January 2009
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../async attribute allows the user space to
|
||||
enable or diasble the device's suspend and resume callbacks to
|
||||
|
@ -79,7 +79,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_count
|
||||
Date: September 2010
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_count attribute contains the number
|
||||
of signaled wakeup events associated with the device. This
|
||||
|
@ -88,7 +88,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_active_count
|
||||
Date: September 2010
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_active_count attribute contains the
|
||||
number of times the processing of wakeup events associated with
|
||||
|
@ -98,7 +98,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_abort_count
|
||||
Date: February 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_abort_count attribute contains the
|
||||
number of times the processing of a wakeup event associated with
|
||||
|
@ -109,7 +109,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_expire_count
|
||||
Date: February 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_expire_count attribute contains the
|
||||
number of times a wakeup event associated with the device has
|
||||
|
@ -119,7 +119,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_active
|
||||
Date: September 2010
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_active attribute contains either 1,
|
||||
or 0, depending on whether or not a wakeup event associated with
|
||||
|
@ -129,7 +129,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_total_time_ms
|
||||
Date: September 2010
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_total_time_ms attribute contains
|
||||
the total time of processing wakeup events associated with the
|
||||
|
@ -139,7 +139,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_max_time_ms
|
||||
Date: September 2010
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_max_time_ms attribute contains
|
||||
the maximum time of processing a single wakeup event associated
|
||||
|
@ -149,7 +149,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_last_time_ms
|
||||
Date: September 2010
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_last_time_ms attribute contains
|
||||
the value of the monotonic clock corresponding to the time of
|
||||
|
@ -160,7 +160,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_prevent_sleep_time_ms
|
||||
Date: February 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_prevent_sleep_time_ms attribute
|
||||
contains the total time the device has been preventing
|
||||
|
@ -189,7 +189,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/pm_qos_latency_us
|
||||
Date: March 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../power/pm_qos_resume_latency_us attribute
|
||||
contains the PM QoS resume latency limit for the given device,
|
||||
|
@ -207,7 +207,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/pm_qos_no_power_off
|
||||
Date: September 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../power/pm_qos_no_power_off attribute
|
||||
is used for manipulating the PM QoS "no power off" flag. If
|
||||
|
@ -222,7 +222,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/pm_qos_remote_wakeup
|
||||
Date: September 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../power/pm_qos_remote_wakeup attribute
|
||||
is used for manipulating the PM QoS "remote wakeup required"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
What: /sys/power/
|
||||
Date: August 2006
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power directory will contain files that will
|
||||
provide a unified interface to the power management
|
||||
|
@ -8,7 +8,7 @@ Description:
|
|||
|
||||
What: /sys/power/state
|
||||
Date: August 2006
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/state file controls the system power state.
|
||||
Reading from this file returns what states are supported,
|
||||
|
@ -22,7 +22,7 @@ Description:
|
|||
|
||||
What: /sys/power/disk
|
||||
Date: September 2006
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/disk file controls the operating mode of the
|
||||
suspend-to-disk mechanism. Reading from this file returns
|
||||
|
@ -67,7 +67,7 @@ Description:
|
|||
|
||||
What: /sys/power/image_size
|
||||
Date: August 2006
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/image_size file controls the size of the image
|
||||
created by the suspend-to-disk mechanism. It can be written a
|
||||
|
@ -84,7 +84,7 @@ Description:
|
|||
|
||||
What: /sys/power/pm_trace
|
||||
Date: August 2006
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/pm_trace file controls the code which saves the
|
||||
last PM event point in the RTC across reboots, so that you can
|
||||
|
@ -133,7 +133,7 @@ Description:
|
|||
|
||||
What: /sys/power/pm_async
|
||||
Date: January 2009
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/pm_async file controls the switch allowing the
|
||||
user space to enable or disable asynchronous suspend and resume
|
||||
|
@ -146,7 +146,7 @@ Description:
|
|||
|
||||
What: /sys/power/wakeup_count
|
||||
Date: July 2010
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/wakeup_count file allows user space to put the
|
||||
system into a sleep state while taking into account the
|
||||
|
@ -161,7 +161,7 @@ Description:
|
|||
|
||||
What: /sys/power/reserved_size
|
||||
Date: May 2011
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/reserved_size file allows user space to control
|
||||
the amount of memory reserved for allocations made by device
|
||||
|
@ -175,7 +175,7 @@ Description:
|
|||
|
||||
What: /sys/power/autosleep
|
||||
Date: April 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/autosleep file can be written one of the strings
|
||||
returned by reads from /sys/power/state. If that happens, a
|
||||
|
@ -192,7 +192,7 @@ Description:
|
|||
|
||||
What: /sys/power/wake_lock
|
||||
Date: February 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/wake_lock file allows user space to create
|
||||
wakeup source objects and activate them on demand (if one of
|
||||
|
@ -219,7 +219,7 @@ Description:
|
|||
|
||||
What: /sys/power/wake_unlock
|
||||
Date: February 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/wake_unlock file allows user space to deactivate
|
||||
wakeup sources created with the help of /sys/power/wake_lock.
|
||||
|
|
|
@ -4,4 +4,4 @@ CONFIG_ACPI_CUSTOM_DSDT builds the image into the kernel.
|
|||
|
||||
When to use this method is described in detail on the
|
||||
Linux/ACPI home page:
|
||||
http://www.lesswatts.org/projects/acpi/overridingDSDT.php
|
||||
https://01.org/linux-acpi/documentation/overriding-dsdt
|
||||
|
|
|
@ -71,7 +71,7 @@ static int netlink_send(int s, struct cn_msg *msg)
|
|||
nlh->nlmsg_seq = seq++;
|
||||
nlh->nlmsg_pid = getpid();
|
||||
nlh->nlmsg_type = NLMSG_DONE;
|
||||
nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh));
|
||||
nlh->nlmsg_len = size;
|
||||
nlh->nlmsg_flags = 0;
|
||||
|
||||
m = NLMSG_DATA(nlh);
|
||||
|
|
|
@ -1,168 +0,0 @@
|
|||
*** Memory binding ***
|
||||
|
||||
The /memory node provides basic information about the address and size
|
||||
of the physical memory. This node is usually filled or updated by the
|
||||
bootloader, depending on the actual memory configuration of the given
|
||||
hardware.
|
||||
|
||||
The memory layout is described by the following node:
|
||||
|
||||
/ {
|
||||
#address-cells = <(n)>;
|
||||
#size-cells = <(m)>;
|
||||
memory {
|
||||
device_type = "memory";
|
||||
reg = <(baseaddr1) (size1)
|
||||
(baseaddr2) (size2)
|
||||
...
|
||||
(baseaddrN) (sizeN)>;
|
||||
};
|
||||
...
|
||||
};
|
||||
|
||||
A memory node follows the typical device tree rules for "reg" property:
|
||||
n: number of cells used to store base address value
|
||||
m: number of cells used to store size value
|
||||
baseaddrX: defines a base address of the defined memory bank
|
||||
sizeX: the size of the defined memory bank
|
||||
|
||||
|
||||
More than one memory bank can be defined.
|
||||
|
||||
|
||||
*** Reserved memory regions ***
|
||||
|
||||
In /memory/reserved-memory node one can create child nodes describing
|
||||
particular reserved (excluded from normal use) memory regions. Such
|
||||
memory regions are usually designed for the special usage by various
|
||||
device drivers. A good example are contiguous memory allocations or
|
||||
memory sharing with other operating system on the same hardware board.
|
||||
Those special memory regions might depend on the board configuration and
|
||||
devices used on the target system.
|
||||
|
||||
Parameters for each memory region can be encoded into the device tree
|
||||
with the following convention:
|
||||
|
||||
[(label):] (name) {
|
||||
compatible = "linux,contiguous-memory-region", "reserved-memory-region";
|
||||
reg = <(address) (size)>;
|
||||
(linux,default-contiguous-region);
|
||||
};
|
||||
|
||||
compatible: one or more of:
|
||||
- "linux,contiguous-memory-region" - enables binding of this
|
||||
region to Contiguous Memory Allocator (special region for
|
||||
contiguous memory allocations, shared with movable system
|
||||
memory, Linux kernel-specific).
|
||||
- "reserved-memory-region" - compatibility is defined, given
|
||||
region is assigned for exclusive usage for by the respective
|
||||
devices.
|
||||
|
||||
reg: standard property defining the base address and size of
|
||||
the memory region
|
||||
|
||||
linux,default-contiguous-region: property indicating that the region
|
||||
is the default region for all contiguous memory
|
||||
allocations, Linux specific (optional)
|
||||
|
||||
It is optional to specify the base address, so if one wants to use
|
||||
autoconfiguration of the base address, '0' can be specified as a base
|
||||
address in the 'reg' property.
|
||||
|
||||
The /memory/reserved-memory node must contain the same #address-cells
|
||||
and #size-cells value as the root node.
|
||||
|
||||
|
||||
*** Device node's properties ***
|
||||
|
||||
Once regions in the /memory/reserved-memory node have been defined, they
|
||||
may be referenced by other device nodes. Bindings that wish to reference
|
||||
memory regions should explicitly document their use of the following
|
||||
property:
|
||||
|
||||
memory-region = <&phandle_to_defined_region>;
|
||||
|
||||
This property indicates that the device driver should use the memory
|
||||
region pointed by the given phandle.
|
||||
|
||||
|
||||
*** Example ***
|
||||
|
||||
This example defines a memory consisting of 4 memory banks. 3 contiguous
|
||||
regions are defined for Linux kernel, one default of all device drivers
|
||||
(named contig_mem, placed at 0x72000000, 64MiB), one dedicated to the
|
||||
framebuffer device (labelled display_mem, placed at 0x78000000, 8MiB)
|
||||
and one for multimedia processing (labelled multimedia_mem, placed at
|
||||
0x77000000, 64MiB). 'display_mem' region is then assigned to fb@12300000
|
||||
device for DMA memory allocations (Linux kernel drivers will use CMA is
|
||||
available or dma-exclusive usage otherwise). 'multimedia_mem' is
|
||||
assigned to scaler@12500000 and codec@12600000 devices for contiguous
|
||||
memory allocations when CMA driver is enabled.
|
||||
|
||||
The reason for creating a separate region for framebuffer device is to
|
||||
match the framebuffer base address to the one configured by bootloader,
|
||||
so once Linux kernel drivers starts no glitches on the displayed boot
|
||||
logo appears. Scaller and codec drivers should share the memory
|
||||
allocations.
|
||||
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
/* ... */
|
||||
|
||||
memory {
|
||||
reg = <0x40000000 0x10000000
|
||||
0x50000000 0x10000000
|
||||
0x60000000 0x10000000
|
||||
0x70000000 0x10000000>;
|
||||
|
||||
reserved-memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
/*
|
||||
* global autoconfigured region for contiguous allocations
|
||||
* (used only with Contiguous Memory Allocator)
|
||||
*/
|
||||
contig_region@0 {
|
||||
compatible = "linux,contiguous-memory-region";
|
||||
reg = <0x0 0x4000000>;
|
||||
linux,default-contiguous-region;
|
||||
};
|
||||
|
||||
/*
|
||||
* special region for framebuffer
|
||||
*/
|
||||
display_region: region@78000000 {
|
||||
compatible = "linux,contiguous-memory-region", "reserved-memory-region";
|
||||
reg = <0x78000000 0x800000>;
|
||||
};
|
||||
|
||||
/*
|
||||
* special region for multimedia processing devices
|
||||
*/
|
||||
multimedia_region: region@77000000 {
|
||||
compatible = "linux,contiguous-memory-region";
|
||||
reg = <0x77000000 0x4000000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
/* ... */
|
||||
|
||||
fb0: fb@12300000 {
|
||||
status = "okay";
|
||||
memory-region = <&display_region>;
|
||||
};
|
||||
|
||||
scaler: scaler@12500000 {
|
||||
status = "okay";
|
||||
memory-region = <&multimedia_region>;
|
||||
};
|
||||
|
||||
codec: codec@12600000 {
|
||||
status = "okay";
|
||||
memory-region = <&multimedia_region>;
|
||||
};
|
||||
};
|
|
@ -28,6 +28,7 @@ ALC269/270/275/276/28x/29x
|
|||
alc269-dmic Enable ALC269(VA) digital mic workaround
|
||||
alc271-dmic Enable ALC271X digital mic workaround
|
||||
inv-dmic Inverted internal mic workaround
|
||||
headset-mic Indicates a combined headset (headphone+mic) jack
|
||||
lenovo-dock Enables docking station I/O for some Lenovos
|
||||
dell-headset-multi Headset jack, which can also be used as mic-in
|
||||
dell-headset-dock Headset jack (without mic-in), and also dock I/O
|
||||
|
|
118
MAINTAINERS
118
MAINTAINERS
|
@ -237,11 +237,11 @@ F: drivers/platform/x86/acer-wmi.c
|
|||
|
||||
ACPI
|
||||
M: Len Brown <lenb@kernel.org>
|
||||
M: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
M: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
L: linux-acpi@vger.kernel.org
|
||||
W: http://www.lesswatts.org/projects/acpi/
|
||||
Q: http://patchwork.kernel.org/project/linux-acpi/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
|
||||
W: https://01.org/linux-acpi
|
||||
Q: https://patchwork.kernel.org/project/linux-acpi/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
|
||||
S: Supported
|
||||
F: drivers/acpi/
|
||||
F: drivers/pnp/pnpacpi/
|
||||
|
@ -256,21 +256,21 @@ F: drivers/pci/*/*/*acpi*
|
|||
ACPI FAN DRIVER
|
||||
M: Zhang Rui <rui.zhang@intel.com>
|
||||
L: linux-acpi@vger.kernel.org
|
||||
W: http://www.lesswatts.org/projects/acpi/
|
||||
W: https://01.org/linux-acpi
|
||||
S: Supported
|
||||
F: drivers/acpi/fan.c
|
||||
|
||||
ACPI THERMAL DRIVER
|
||||
M: Zhang Rui <rui.zhang@intel.com>
|
||||
L: linux-acpi@vger.kernel.org
|
||||
W: http://www.lesswatts.org/projects/acpi/
|
||||
W: https://01.org/linux-acpi
|
||||
S: Supported
|
||||
F: drivers/acpi/*thermal*
|
||||
|
||||
ACPI VIDEO DRIVER
|
||||
M: Zhang Rui <rui.zhang@intel.com>
|
||||
L: linux-acpi@vger.kernel.org
|
||||
W: http://www.lesswatts.org/projects/acpi/
|
||||
W: https://01.org/linux-acpi
|
||||
S: Supported
|
||||
F: drivers/acpi/video.c
|
||||
|
||||
|
@ -1009,6 +1009,7 @@ ARM/Marvell Armada 370 and Armada XP SOC support
|
|||
M: Jason Cooper <jason@lakedaemon.net>
|
||||
M: Andrew Lunn <andrew@lunn.ch>
|
||||
M: Gregory Clement <gregory.clement@free-electrons.com>
|
||||
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/mach-mvebu/
|
||||
|
@ -1016,6 +1017,7 @@ F: arch/arm/mach-mvebu/
|
|||
ARM/Marvell Dove/Kirkwood/MV78xx0/Orion SOC support
|
||||
M: Jason Cooper <jason@lakedaemon.net>
|
||||
M: Andrew Lunn <andrew@lunn.ch>
|
||||
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/mach-dove/
|
||||
|
@ -1148,6 +1150,13 @@ F: drivers/net/ethernet/i825xx/ether1*
|
|||
F: drivers/net/ethernet/seeq/ether3*
|
||||
F: drivers/scsi/arm/
|
||||
|
||||
ARM/Rockchip SoC support
|
||||
M: Heiko Stuebner <heiko@sntech.de>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/mach-rockchip/
|
||||
F: drivers/*/*rockchip*
|
||||
|
||||
ARM/SHARK MACHINE SUPPORT
|
||||
M: Alexander Schulz <alex@shark-linux.de>
|
||||
W: http://www.shark-linux.de/shark.html
|
||||
|
@ -1791,6 +1800,7 @@ F: include/net/bluetooth/
|
|||
|
||||
BONDING DRIVER
|
||||
M: Jay Vosburgh <fubar@us.ibm.com>
|
||||
M: Veaceslav Falico <vfalico@redhat.com>
|
||||
M: Andy Gospodarek <andy@greyhouse.net>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://sourceforge.net/projects/bonding/
|
||||
|
@ -2300,7 +2310,7 @@ S: Maintained
|
|||
F: drivers/net/ethernet/ti/cpmac.c
|
||||
|
||||
CPU FREQUENCY DRIVERS
|
||||
M: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
M: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
M: Viresh Kumar <viresh.kumar@linaro.org>
|
||||
L: cpufreq@vger.kernel.org
|
||||
L: linux-pm@vger.kernel.org
|
||||
|
@ -2331,7 +2341,7 @@ S: Maintained
|
|||
F: drivers/cpuidle/cpuidle-big_little.c
|
||||
|
||||
CPUIDLE DRIVERS
|
||||
M: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
M: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
M: Daniel Lezcano <daniel.lezcano@linaro.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -2718,6 +2728,8 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
|
|||
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
|
||||
M: Vinod Koul <vinod.koul@intel.com>
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
L: dmaengine@vger.kernel.org
|
||||
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
|
||||
S: Supported
|
||||
F: drivers/dma/
|
||||
F: include/linux/dma*
|
||||
|
@ -2821,7 +2833,7 @@ M: Terje Bergström <tbergstrom@nvidia.com>
|
|||
L: dri-devel@lists.freedesktop.org
|
||||
L: linux-tegra@vger.kernel.org
|
||||
T: git git://anongit.freedesktop.org/tegra/linux.git
|
||||
S: Maintained
|
||||
S: Supported
|
||||
F: drivers/gpu/host1x/
|
||||
F: include/uapi/drm/tegra_drm.h
|
||||
F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
|
||||
|
@ -3553,7 +3565,7 @@ F: fs/freevxfs/
|
|||
|
||||
FREEZER
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
M: "Rafael J. Wysocki" <rjw@sisk.pl>
|
||||
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/power/freezing-of-tasks.txt
|
||||
|
@ -3624,6 +3636,12 @@ L: linux-scsi@vger.kernel.org
|
|||
S: Odd Fixes (e.g., new signatures)
|
||||
F: drivers/scsi/fdomain.*
|
||||
|
||||
GCOV BASED KERNEL PROFILING
|
||||
M: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
|
||||
S: Maintained
|
||||
F: kernel/gcov/
|
||||
F: Documentation/gcov.txt
|
||||
|
||||
GDT SCSI DISK ARRAY CONTROLLER DRIVER
|
||||
M: Achim Leubner <achim_leubner@adaptec.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
|
@ -3889,7 +3907,7 @@ F: drivers/video/hgafb.c
|
|||
|
||||
HIBERNATION (aka Software Suspend, aka swsusp)
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
M: "Rafael J. Wysocki" <rjw@sisk.pl>
|
||||
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
F: arch/x86/power/
|
||||
|
@ -4339,7 +4357,7 @@ F: drivers/video/i810/
|
|||
INTEL MENLOW THERMAL DRIVER
|
||||
M: Sujith Thomas <sujith.thomas@intel.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
W: http://www.lesswatts.org/projects/acpi/
|
||||
W: https://01.org/linux-acpi
|
||||
S: Supported
|
||||
F: drivers/platform/x86/intel_menlow.c
|
||||
|
||||
|
@ -4351,7 +4369,10 @@ F: arch/x86/kernel/microcode_intel.c
|
|||
|
||||
INTEL I/OAT DMA DRIVER
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
S: Maintained
|
||||
M: Dave Jiang <dave.jiang@intel.com>
|
||||
L: dmaengine@vger.kernel.org
|
||||
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
|
||||
S: Supported
|
||||
F: drivers/dma/ioat*
|
||||
|
||||
INTEL IOMMU (VT-d)
|
||||
|
@ -7818,6 +7839,13 @@ F: Documentation/sound/alsa/soc/
|
|||
F: sound/soc/
|
||||
F: include/sound/soc*
|
||||
|
||||
SOUND - DMAENGINE HELPERS
|
||||
M: Lars-Peter Clausen <lars@metafoo.de>
|
||||
S: Supported
|
||||
F: include/sound/dmaengine_pcm.h
|
||||
F: sound/core/pcm_dmaengine.c
|
||||
F: sound/soc/soc-generic-dmaengine-pcm.c
|
||||
|
||||
SPARC + UltraSPARC (sparc/sparc64)
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
L: sparclinux@vger.kernel.org
|
||||
|
@ -8097,7 +8125,7 @@ F: drivers/sh/
|
|||
SUSPEND TO RAM
|
||||
M: Len Brown <len.brown@intel.com>
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
M: "Rafael J. Wysocki" <rjw@sisk.pl>
|
||||
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/power/
|
||||
|
@ -8290,14 +8318,72 @@ L: linux-media@vger.kernel.org
|
|||
S: Maintained
|
||||
F: drivers/media/rc/ttusbir.c
|
||||
|
||||
TEGRA SUPPORT
|
||||
TEGRA ARCHITECTURE SUPPORT
|
||||
M: Stephen Warren <swarren@wwwdotorg.org>
|
||||
M: Thierry Reding <thierry.reding@gmail.com>
|
||||
L: linux-tegra@vger.kernel.org
|
||||
Q: http://patchwork.ozlabs.org/project/linux-tegra/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
|
||||
S: Supported
|
||||
N: [^a-z]tegra
|
||||
|
||||
TEGRA ASOC DRIVER
|
||||
M: Stephen Warren <swarren@wwwdotorg.org>
|
||||
S: Supported
|
||||
F: sound/soc/tegra/
|
||||
|
||||
TEGRA CLOCK DRIVER
|
||||
M: Peter De Schrijver <pdeschrijver@nvidia.com>
|
||||
M: Prashant Gaikwad <pgaikwad@nvidia.com>
|
||||
S: Supported
|
||||
F: drivers/clk/tegra/
|
||||
|
||||
TEGRA DMA DRIVER
|
||||
M: Laxman Dewangan <ldewangan@nvidia.com>
|
||||
S: Supported
|
||||
F: drivers/dma/tegra20-apb-dma.c
|
||||
|
||||
TEGRA GPIO DRIVER
|
||||
M: Stephen Warren <swarren@wwwdotorg.org>
|
||||
S: Supported
|
||||
F: drivers/gpio/gpio-tegra.c
|
||||
|
||||
TEGRA I2C DRIVER
|
||||
M: Laxman Dewangan <ldewangan@nvidia.com>
|
||||
S: Supported
|
||||
F: drivers/i2c/busses/i2c-tegra.c
|
||||
|
||||
TEGRA IOMMU DRIVERS
|
||||
M: Hiroshi Doyu <hdoyu@nvidia.com>
|
||||
S: Supported
|
||||
F: drivers/iommu/tegra*
|
||||
|
||||
TEGRA KBC DRIVER
|
||||
M: Rakesh Iyer <riyer@nvidia.com>
|
||||
M: Laxman Dewangan <ldewangan@nvidia.com>
|
||||
S: Supported
|
||||
F: drivers/input/keyboard/tegra-kbc.c
|
||||
|
||||
TEGRA PINCTRL DRIVER
|
||||
M: Stephen Warren <swarren@wwwdotorg.org>
|
||||
S: Supported
|
||||
F: drivers/pinctrl/pinctrl-tegra*
|
||||
|
||||
TEGRA PWM DRIVER
|
||||
M: Thierry Reding <thierry.reding@gmail.com>
|
||||
S: Supported
|
||||
F: drivers/pwm/pwm-tegra.c
|
||||
|
||||
TEGRA SERIAL DRIVER
|
||||
M: Laxman Dewangan <ldewangan@nvidia.com>
|
||||
S: Supported
|
||||
F: drivers/tty/serial/serial-tegra.c
|
||||
|
||||
TEGRA SPI DRIVER
|
||||
M: Laxman Dewangan <ldewangan@nvidia.com>
|
||||
S: Supported
|
||||
F: drivers/spi/spi-tegra*
|
||||
|
||||
TEHUTI ETHERNET DRIVER
|
||||
M: Andy Gospodarek <andy@greyhouse.net>
|
||||
L: netdev@vger.kernel.org
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = One Giant Leap for Frogkind
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -102,7 +102,7 @@ static int genregs_set(struct task_struct *target,
|
|||
REG_IGNORE_ONE(pad2);
|
||||
REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
|
||||
REG_IGNORE_ONE(efa); /* efa update invalid */
|
||||
REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
|
||||
REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -96,6 +96,11 @@
|
|||
<1 14 0xf08>,
|
||||
<1 11 0xf08>,
|
||||
<1 10 0xf08>;
|
||||
/* Unfortunately we need this since some versions of U-Boot
|
||||
* on Exynos don't set the CNTFRQ register, so we need the
|
||||
* value from DT.
|
||||
*/
|
||||
clock-frequency = <24000000>;
|
||||
};
|
||||
|
||||
mct@101C0000 {
|
||||
|
|
|
@ -9,11 +9,6 @@
|
|||
model = "ARM Integrator/CP";
|
||||
compatible = "arm,integrator-cp";
|
||||
|
||||
aliases {
|
||||
arm,timer-primary = &timer2;
|
||||
arm,timer-secondary = &timer1;
|
||||
};
|
||||
|
||||
chosen {
|
||||
bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk";
|
||||
};
|
||||
|
@ -24,14 +19,18 @@
|
|||
};
|
||||
|
||||
timer0: timer@13000000 {
|
||||
/* TIMER0 runs @ 25MHz */
|
||||
compatible = "arm,integrator-cp-timer";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
timer1: timer@13000100 {
|
||||
/* TIMER1 runs @ 1MHz */
|
||||
compatible = "arm,integrator-cp-timer";
|
||||
};
|
||||
|
||||
timer2: timer@13000200 {
|
||||
/* TIMER2 runs @ 1MHz */
|
||||
compatible = "arm,integrator-cp-timer";
|
||||
};
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
/ {
|
||||
model = "TI OMAP3 BeagleBoard xM";
|
||||
compatible = "ti,omap3-beagle-xm", "ti,omap3-beagle", "ti,omap3";
|
||||
compatible = "ti,omap3-beagle-xm", "ti,omap36xx", "ti,omap3";
|
||||
|
||||
cpus {
|
||||
cpu@0 {
|
||||
|
|
|
@ -108,7 +108,7 @@
|
|||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
pinctrl-single,register-width = <16>;
|
||||
pinctrl-single,function-mask = <0x7f1f>;
|
||||
pinctrl-single,function-mask = <0xff1f>;
|
||||
};
|
||||
|
||||
omap3_pmx_wkup: pinmux@0x48002a00 {
|
||||
|
@ -117,7 +117,7 @@
|
|||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
pinctrl-single,register-width = <16>;
|
||||
pinctrl-single,function-mask = <0x7f1f>;
|
||||
pinctrl-single,function-mask = <0xff1f>;
|
||||
};
|
||||
|
||||
gpio1: gpio@48310000 {
|
||||
|
|
|
@ -51,7 +51,8 @@ void mcpm_cpu_power_down(void)
|
|||
{
|
||||
phys_reset_t phys_reset;
|
||||
|
||||
BUG_ON(!platform_ops);
|
||||
if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
|
||||
return;
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
/*
|
||||
|
@ -93,7 +94,8 @@ void mcpm_cpu_suspend(u64 expected_residency)
|
|||
{
|
||||
phys_reset_t phys_reset;
|
||||
|
||||
BUG_ON(!platform_ops);
|
||||
if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
|
||||
return;
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
/* Very similar to mcpm_cpu_power_down() */
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/mach/sharpsl_param.h>
|
||||
#include <asm/memory.h>
|
||||
|
||||
/*
|
||||
* Certain hardware parameters determined at the time of device manufacture,
|
||||
|
@ -25,8 +26,10 @@
|
|||
*/
|
||||
#ifdef CONFIG_ARCH_SA1100
|
||||
#define PARAM_BASE 0xe8ffc000
|
||||
#define param_start(x) (void *)(x)
|
||||
#else
|
||||
#define PARAM_BASE 0xa0000a00
|
||||
#define param_start(x) __va(x)
|
||||
#endif
|
||||
#define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a )
|
||||
|
||||
|
@ -41,7 +44,7 @@ EXPORT_SYMBOL(sharpsl_param);
|
|||
|
||||
void sharpsl_save_param(void)
|
||||
{
|
||||
memcpy(&sharpsl_param, (void *)PARAM_BASE, sizeof(struct sharpsl_param_info));
|
||||
memcpy(&sharpsl_param, param_start(PARAM_BASE), sizeof(struct sharpsl_param_info));
|
||||
|
||||
if (sharpsl_param.comadj_keyword != COMADJ_MAGIC)
|
||||
sharpsl_param.comadj=-1;
|
||||
|
|
|
@ -31,6 +31,5 @@ generic-y += termbits.h
|
|||
generic-y += termios.h
|
||||
generic-y += timex.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += types.h
|
||||
generic-y += unaligned.h
|
||||
generic-y += preempt.h
|
||||
|
|
|
@ -76,8 +76,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
|
|||
*
|
||||
* This must be called with interrupts disabled.
|
||||
*
|
||||
* This does not return. Re-entry in the kernel is expected via
|
||||
* mcpm_entry_point.
|
||||
* On success this does not return. Re-entry in the kernel is expected
|
||||
* via mcpm_entry_point.
|
||||
*
|
||||
* This will return if mcpm_platform_register() has not been called
|
||||
* previously in which case the caller should take appropriate action.
|
||||
*/
|
||||
void mcpm_cpu_power_down(void);
|
||||
|
||||
|
@ -98,8 +101,11 @@ void mcpm_cpu_power_down(void);
|
|||
*
|
||||
* This must be called with interrupts disabled.
|
||||
*
|
||||
* This does not return. Re-entry in the kernel is expected via
|
||||
* mcpm_entry_point.
|
||||
* On success this does not return. Re-entry in the kernel is expected
|
||||
* via mcpm_entry_point.
|
||||
*
|
||||
* This will return if mcpm_platform_register() has not been called
|
||||
* previously in which case the caller should take appropriate action.
|
||||
*/
|
||||
void mcpm_cpu_suspend(u64 expected_residency);
|
||||
|
||||
|
|
|
@ -57,6 +57,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
|
|||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
if (n == 0)
|
||||
return;
|
||||
|
||||
if (i + n > SYSCALL_MAX_ARGS) {
|
||||
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
|
||||
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
|
||||
|
@ -81,6 +84,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
|
|||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
if (n == 0)
|
||||
return;
|
||||
|
||||
if (i + n > SYSCALL_MAX_ARGS) {
|
||||
pr_warning("%s called with max args %d, handling only %d\n",
|
||||
__func__, i + n, SYSCALL_MAX_ARGS);
|
||||
|
|
|
@ -487,7 +487,26 @@ __fixup_smp:
|
|||
mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
|
||||
and r0, r0, #0xc0000000 @ multiprocessing extensions and
|
||||
teq r0, #0x80000000 @ not part of a uniprocessor system?
|
||||
moveq pc, lr @ yes, assume SMP
|
||||
bne __fixup_smp_on_up @ no, assume UP
|
||||
|
||||
@ Core indicates it is SMP. Check for Aegis SOC where a single
|
||||
@ Cortex-A9 CPU is present but SMP operations fault.
|
||||
mov r4, #0x41000000
|
||||
orr r4, r4, #0x0000c000
|
||||
orr r4, r4, #0x00000090
|
||||
teq r3, r4 @ Check for ARM Cortex-A9
|
||||
movne pc, lr @ Not ARM Cortex-A9,
|
||||
|
||||
@ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
|
||||
@ below address check will need to be #ifdef'd or equivalent
|
||||
@ for the Aegis platform.
|
||||
mrc p15, 4, r0, c15, c0 @ get SCU base address
|
||||
teq r0, #0x0 @ '0' on actual UP A9 hardware
|
||||
beq __fixup_smp_on_up @ So its an A9 UP
|
||||
ldr r0, [r0, #4] @ read SCU Config
|
||||
and r0, r0, #0x3 @ number of CPUs
|
||||
teq r0, #0x0 @ is 1?
|
||||
movne pc, lr
|
||||
|
||||
__fixup_smp_on_up:
|
||||
adr r0, 1f
|
||||
|
|
|
@ -129,6 +129,24 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
|
|||
.restart = omap3xxx_restart,
|
||||
MACHINE_END
|
||||
|
||||
static const char *omap36xx_boards_compat[] __initdata = {
|
||||
"ti,omap36xx",
|
||||
NULL,
|
||||
};
|
||||
|
||||
DT_MACHINE_START(OMAP36XX_DT, "Generic OMAP36xx (Flattened Device Tree)")
|
||||
.reserve = omap_reserve,
|
||||
.map_io = omap3_map_io,
|
||||
.init_early = omap3630_init_early,
|
||||
.init_irq = omap_intc_of_init,
|
||||
.handle_irq = omap3_intc_handle_irq,
|
||||
.init_machine = omap_generic_init,
|
||||
.init_late = omap3_init_late,
|
||||
.init_time = omap3_sync32k_timer_init,
|
||||
.dt_compat = omap36xx_boards_compat,
|
||||
.restart = omap3xxx_restart,
|
||||
MACHINE_END
|
||||
|
||||
static const char *omap3_gp_boards_compat[] __initdata = {
|
||||
"ti,omap3-beagle",
|
||||
"timll,omap3-devkit8000",
|
||||
|
|
|
@ -167,38 +167,47 @@ static struct lp55xx_led_config rx51_lp5523_led_config[] = {
|
|||
.name = "lp5523:kb1",
|
||||
.chan_nr = 0,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:kb2",
|
||||
.chan_nr = 1,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:kb3",
|
||||
.chan_nr = 2,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:kb4",
|
||||
.chan_nr = 3,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:b",
|
||||
.chan_nr = 4,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:g",
|
||||
.chan_nr = 5,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:r",
|
||||
.chan_nr = 6,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:kb5",
|
||||
.chan_nr = 7,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:kb6",
|
||||
.chan_nr = 8,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -272,9 +272,19 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
|
|||
struct gpmc_timings t;
|
||||
int ret;
|
||||
|
||||
if (gpmc_onenand_data->of_node)
|
||||
if (gpmc_onenand_data->of_node) {
|
||||
gpmc_read_settings_dt(gpmc_onenand_data->of_node,
|
||||
&onenand_async);
|
||||
if (onenand_async.sync_read || onenand_async.sync_write) {
|
||||
if (onenand_async.sync_write)
|
||||
gpmc_onenand_data->flags |=
|
||||
ONENAND_SYNC_READWRITE;
|
||||
else
|
||||
gpmc_onenand_data->flags |= ONENAND_SYNC_READ;
|
||||
onenand_async.sync_read = false;
|
||||
onenand_async.sync_write = false;
|
||||
}
|
||||
}
|
||||
|
||||
omap2_onenand_set_async_mode(onenand_base);
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#define OMAP_PULL_UP (1 << 4)
|
||||
#define OMAP_ALTELECTRICALSEL (1 << 5)
|
||||
|
||||
/* 34xx specific mux bit defines */
|
||||
/* omap3/4/5 specific mux bit defines */
|
||||
#define OMAP_INPUT_EN (1 << 8)
|
||||
#define OMAP_OFF_EN (1 << 9)
|
||||
#define OMAP_OFFOUT_EN (1 << 10)
|
||||
|
@ -36,8 +36,6 @@
|
|||
#define OMAP_OFF_PULL_EN (1 << 12)
|
||||
#define OMAP_OFF_PULL_UP (1 << 13)
|
||||
#define OMAP_WAKEUP_EN (1 << 14)
|
||||
|
||||
/* 44xx specific mux bit defines */
|
||||
#define OMAP_WAKEUP_EVENT (1 << 15)
|
||||
|
||||
/* Active pin states */
|
||||
|
|
|
@ -628,7 +628,7 @@ void __init omap4_local_timer_init(void)
|
|||
#endif /* CONFIG_HAVE_ARM_TWD */
|
||||
#endif /* CONFIG_ARCH_OMAP4 */
|
||||
|
||||
#ifdef CONFIG_SOC_OMAP5
|
||||
#if defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX)
|
||||
void __init omap5_realtime_timer_init(void)
|
||||
{
|
||||
omap4_sync32k_timer_init();
|
||||
|
@ -636,7 +636,7 @@ void __init omap5_realtime_timer_init(void)
|
|||
|
||||
clocksource_of_init();
|
||||
}
|
||||
#endif /* CONFIG_SOC_OMAP5 */
|
||||
#endif /* CONFIG_SOC_OMAP5 || CONFIG_SOC_DRA7XX */
|
||||
|
||||
/**
|
||||
* omap_timer_init - build and register timer device with an
|
||||
|
|
|
@ -1232,7 +1232,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
|
|||
break;
|
||||
|
||||
len = (j - i) << PAGE_SHIFT;
|
||||
ret = iommu_map(mapping->domain, iova, phys, len, 0);
|
||||
ret = iommu_map(mapping->domain, iova, phys, len,
|
||||
IOMMU_READ|IOMMU_WRITE);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
iova += len;
|
||||
|
@ -1431,6 +1432,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
static int __dma_direction_to_prot(enum dma_data_direction dir)
|
||||
{
|
||||
int prot;
|
||||
|
||||
switch (dir) {
|
||||
case DMA_BIDIRECTIONAL:
|
||||
prot = IOMMU_READ | IOMMU_WRITE;
|
||||
break;
|
||||
case DMA_TO_DEVICE:
|
||||
prot = IOMMU_READ;
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
prot = IOMMU_WRITE;
|
||||
break;
|
||||
default:
|
||||
prot = 0;
|
||||
}
|
||||
|
||||
return prot;
|
||||
}
|
||||
|
||||
/*
|
||||
* Map a part of the scatter-gather list into contiguous io address space
|
||||
*/
|
||||
|
@ -1444,6 +1466,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|||
int ret = 0;
|
||||
unsigned int count;
|
||||
struct scatterlist *s;
|
||||
int prot;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
*handle = DMA_ERROR_CODE;
|
||||
|
@ -1460,7 +1483,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|||
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
||||
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
||||
|
||||
ret = iommu_map(mapping->domain, iova, phys, len, 0);
|
||||
prot = __dma_direction_to_prot(dir);
|
||||
|
||||
ret = iommu_map(mapping->domain, iova, phys, len, prot);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
count += len >> PAGE_SHIFT;
|
||||
|
@ -1665,19 +1690,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
|
|||
if (dma_addr == DMA_ERROR_CODE)
|
||||
return dma_addr;
|
||||
|
||||
switch (dir) {
|
||||
case DMA_BIDIRECTIONAL:
|
||||
prot = IOMMU_READ | IOMMU_WRITE;
|
||||
break;
|
||||
case DMA_TO_DEVICE:
|
||||
prot = IOMMU_READ;
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
prot = IOMMU_WRITE;
|
||||
break;
|
||||
default:
|
||||
prot = 0;
|
||||
}
|
||||
prot = __dma_direction_to_prot(dir);
|
||||
|
||||
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <linux/nodemask.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/of_reserved_mem.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/memblock.h>
|
||||
|
@ -379,8 +378,6 @@ void __init arm_memblock_init(struct meminfo *mi,
|
|||
if (mdesc->reserve)
|
||||
mdesc->reserve();
|
||||
|
||||
early_init_dt_scan_reserved_mem();
|
||||
|
||||
/*
|
||||
* reserve memory for DMA contigouos allocations,
|
||||
* must come from DMA area inside low memory
|
||||
|
|
|
@ -930,4 +930,5 @@ void bpf_jit_free(struct sk_filter *fp)
|
|||
{
|
||||
if (fp->bpf_func != sk_run_filter)
|
||||
module_free(NULL, fp->bpf_func);
|
||||
kfree(fp);
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@
|
|||
3:
|
||||
|
||||
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
||||
PTR_L t8, __stack_chk_guard
|
||||
PTR_LA t8, __stack_chk_guard
|
||||
LONG_L t9, TASK_STACK_CANARY(a1)
|
||||
LONG_S t9, 0(t8)
|
||||
#endif
|
||||
|
|
|
@ -67,7 +67,7 @@ LEAF(resume)
|
|||
1:
|
||||
|
||||
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
||||
PTR_L t8, __stack_chk_guard
|
||||
PTR_LA t8, __stack_chk_guard
|
||||
LONG_L t9, TASK_STACK_CANARY(a1)
|
||||
LONG_S t9, 0(t8)
|
||||
#endif
|
||||
|
|
|
@ -69,7 +69,7 @@
|
|||
1:
|
||||
|
||||
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
||||
PTR_L t8, __stack_chk_guard
|
||||
PTR_LA t8, __stack_chk_guard
|
||||
LONG_L t9, TASK_STACK_CANARY(a1)
|
||||
LONG_S t9, 0(t8)
|
||||
#endif
|
||||
|
|
|
@ -40,6 +40,8 @@ CONFIG_IP_NF_QUEUE=m
|
|||
CONFIG_LLC2=m
|
||||
CONFIG_NET_PKTGEN=m
|
||||
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_DEVTMPFS_MOUNT=y
|
||||
# CONFIG_STANDALONE is not set
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
CONFIG_PARPORT=y
|
||||
|
|
|
@ -79,6 +79,8 @@ CONFIG_IP_DCCP=m
|
|||
CONFIG_LLC2=m
|
||||
CONFIG_NET_PKTGEN=m
|
||||
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_DEVTMPFS_MOUNT=y
|
||||
# CONFIG_STANDALONE is not set
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
CONFIG_BLK_DEV_UMEM=m
|
||||
|
|
|
@ -4,6 +4,7 @@ CONFIG_IKCONFIG=y
|
|||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_LOG_BUF_SHIFT=16
|
||||
CONFIG_SYSFS_DEPRECATED_V2=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
|
@ -27,6 +28,8 @@ CONFIG_IP_PNP_BOOTP=y
|
|||
# CONFIG_INET_LRO is not set
|
||||
CONFIG_IPV6=y
|
||||
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_DEVTMPFS_MOUNT=y
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
CONFIG_PARPORT=y
|
||||
CONFIG_PARPORT_PC=y
|
||||
|
|
|
@ -5,6 +5,7 @@ CONFIG_IKCONFIG=y
|
|||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_LOG_BUF_SHIFT=16
|
||||
CONFIG_SYSFS_DEPRECATED_V2=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||||
CONFIG_EXPERT=y
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
|
@ -39,6 +40,8 @@ CONFIG_NETFILTER_DEBUG=y
|
|||
CONFIG_IP_NF_QUEUE=m
|
||||
CONFIG_NET_PKTGEN=m
|
||||
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_DEVTMPFS_MOUNT=y
|
||||
# CONFIG_STANDALONE is not set
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
CONFIG_BLK_DEV_UMEM=m
|
||||
|
|
|
@ -62,6 +62,8 @@ CONFIG_TIPC=m
|
|||
CONFIG_LLC2=m
|
||||
CONFIG_DNS_RESOLVER=y
|
||||
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_DEVTMPFS_MOUNT=y
|
||||
# CONFIG_STANDALONE is not set
|
||||
CONFIG_PARPORT=y
|
||||
CONFIG_PARPORT_PC=y
|
||||
|
|
|
@ -49,6 +49,8 @@ CONFIG_INET6_ESP=y
|
|||
CONFIG_INET6_IPCOMP=y
|
||||
CONFIG_LLC2=m
|
||||
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_DEVTMPFS_MOUNT=y
|
||||
# CONFIG_STANDALONE is not set
|
||||
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
|
||||
CONFIG_PARPORT=y
|
||||
|
|
|
@ -6,7 +6,7 @@ struct pt_regs;
|
|||
|
||||
/* traps.c */
|
||||
void parisc_terminate(char *msg, struct pt_regs *regs,
|
||||
int code, unsigned long offset);
|
||||
int code, unsigned long offset) __noreturn __cold;
|
||||
|
||||
/* mm/fault.c */
|
||||
void do_page_fault(struct pt_regs *regs, unsigned long code,
|
||||
|
|
|
@ -195,6 +195,8 @@ common_stext:
|
|||
ldw MEM_PDC_HI(%r0),%r6
|
||||
depd %r6, 31, 32, %r3 /* move to upper word */
|
||||
|
||||
mfctl %cr30,%r6 /* PCX-W2 firmware bug */
|
||||
|
||||
ldo PDC_PSW(%r0),%arg0 /* 21 */
|
||||
ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
|
||||
ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
|
||||
|
@ -203,6 +205,8 @@ common_stext:
|
|||
copy %r0,%arg3
|
||||
|
||||
stext_pdc_ret:
|
||||
mtctl %r6,%cr30 /* restore task thread info */
|
||||
|
||||
/* restore rfi target address*/
|
||||
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
|
||||
tophys_r1 %r10
|
||||
|
|
|
@ -72,7 +72,6 @@ enum ipi_message_type {
|
|||
IPI_NOP=0,
|
||||
IPI_RESCHEDULE=1,
|
||||
IPI_CALL_FUNC,
|
||||
IPI_CALL_FUNC_SINGLE,
|
||||
IPI_CPU_START,
|
||||
IPI_CPU_STOP,
|
||||
IPI_CPU_TEST
|
||||
|
@ -164,11 +163,6 @@ ipi_interrupt(int irq, void *dev_id)
|
|||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
|
||||
case IPI_CALL_FUNC_SINGLE:
|
||||
smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
|
||||
generic_smp_call_function_single_interrupt();
|
||||
break;
|
||||
|
||||
case IPI_CPU_START:
|
||||
smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
|
||||
break;
|
||||
|
@ -260,7 +254,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
|||
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
|
||||
send_IPI_single(cpu, IPI_CALL_FUNC);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -291,11 +291,6 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
|
|||
do_exit(SIGSEGV);
|
||||
}
|
||||
|
||||
int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
|
||||
{
|
||||
return syscall(regs);
|
||||
}
|
||||
|
||||
/* gdb uses break 4,8 */
|
||||
#define GDB_BREAK_INSN 0x10004
|
||||
static void handle_gdb_break(struct pt_regs *regs, int wot)
|
||||
|
@ -805,14 +800,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
|
|||
else {
|
||||
|
||||
/*
|
||||
* The kernel should never fault on its own address space.
|
||||
* The kernel should never fault on its own address space,
|
||||
* unless pagefault_disable() was called before.
|
||||
*/
|
||||
|
||||
if (fault_space == 0)
|
||||
if (fault_space == 0 && !in_atomic())
|
||||
{
|
||||
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
|
||||
parisc_terminate("Kernel Fault", regs, code, fault_address);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@
|
|||
#ifdef __KERNEL__
|
||||
#include <linux/module.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/uaccess.h>
|
||||
#define s_space "%%sr1"
|
||||
#define d_space "%%sr2"
|
||||
#else
|
||||
|
@ -524,4 +524,17 @@ EXPORT_SYMBOL(copy_to_user);
|
|||
EXPORT_SYMBOL(copy_from_user);
|
||||
EXPORT_SYMBOL(copy_in_user);
|
||||
EXPORT_SYMBOL(memcpy);
|
||||
|
||||
long probe_kernel_read(void *dst, const void *src, size_t size)
|
||||
{
|
||||
unsigned long addr = (unsigned long)src;
|
||||
|
||||
if (size < 0 || addr < PAGE_SIZE)
|
||||
return -EFAULT;
|
||||
|
||||
/* check for I/O space F_EXTEND(0xfff00000) access as well? */
|
||||
|
||||
return __probe_kernel_read(dst, src, size);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -171,20 +171,25 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
|
|||
unsigned long address)
|
||||
{
|
||||
struct vm_area_struct *vma, *prev_vma;
|
||||
struct task_struct *tsk = current;
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
struct task_struct *tsk;
|
||||
struct mm_struct *mm;
|
||||
unsigned long acc_type;
|
||||
int fault;
|
||||
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
unsigned int flags;
|
||||
|
||||
if (in_atomic() || !mm)
|
||||
if (in_atomic())
|
||||
goto no_context;
|
||||
|
||||
tsk = current;
|
||||
mm = tsk->mm;
|
||||
if (!mm)
|
||||
goto no_context;
|
||||
|
||||
flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
|
||||
acc_type = parisc_acctyp(code, regs->iir);
|
||||
|
||||
if (acc_type & VM_WRITE)
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
retry:
|
||||
|
|
|
@ -691,4 +691,5 @@ void bpf_jit_free(struct sk_filter *fp)
|
|||
{
|
||||
if (fp->bpf_func != sk_run_filter)
|
||||
module_free(NULL, fp->bpf_func);
|
||||
kfree(fp);
|
||||
}
|
||||
|
|
|
@ -748,7 +748,9 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
|
|||
|
||||
static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
|
||||
{
|
||||
if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) {
|
||||
if (!MACHINE_HAS_ESOP &&
|
||||
(pte_val(entry) & _PAGE_PRESENT) &&
|
||||
(pte_val(entry) & _PAGE_WRITE)) {
|
||||
/*
|
||||
* Without enhanced suppression-on-protection force
|
||||
* the dirty bit on for all writable ptes.
|
||||
|
|
|
@ -71,30 +71,30 @@ static inline void local_tick_enable(unsigned long long comp)
|
|||
|
||||
typedef unsigned long long cycles_t;
|
||||
|
||||
static inline unsigned long long get_tod_clock(void)
|
||||
{
|
||||
unsigned long long clk;
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
|
||||
asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
|
||||
#else
|
||||
asm volatile("stck %0" : "=Q" (clk) : : "cc");
|
||||
#endif
|
||||
return clk;
|
||||
}
|
||||
|
||||
static inline void get_tod_clock_ext(char *clk)
|
||||
{
|
||||
asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
|
||||
}
|
||||
|
||||
static inline unsigned long long get_tod_clock_xt(void)
|
||||
static inline unsigned long long get_tod_clock(void)
|
||||
{
|
||||
unsigned char clk[16];
|
||||
get_tod_clock_ext(clk);
|
||||
return *((unsigned long long *)&clk[1]);
|
||||
}
|
||||
|
||||
static inline unsigned long long get_tod_clock_fast(void)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
|
||||
unsigned long long clk;
|
||||
|
||||
asm volatile("stckf %0" : "=Q" (clk) : : "cc");
|
||||
return clk;
|
||||
#else
|
||||
return get_tod_clock();
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline cycles_t get_cycles(void)
|
||||
{
|
||||
return (cycles_t) get_tod_clock() >> 2;
|
||||
|
@ -125,7 +125,7 @@ extern u64 sched_clock_base_cc;
|
|||
*/
|
||||
static inline unsigned long long get_tod_clock_monotonic(void)
|
||||
{
|
||||
return get_tod_clock_xt() - sched_clock_base_cc;
|
||||
return get_tod_clock() - sched_clock_base_cc;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -99,7 +99,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
|
|||
break;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
return err ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
||||
|
@ -148,7 +148,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
|||
break;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
return err ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
|
||||
|
|
|
@ -867,7 +867,7 @@ static inline void
|
|||
debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
|
||||
int exception)
|
||||
{
|
||||
active->id.stck = get_tod_clock();
|
||||
active->id.stck = get_tod_clock_fast();
|
||||
active->id.fields.cpuid = smp_processor_id();
|
||||
active->caller = __builtin_return_address(0);
|
||||
active->id.fields.exception = exception;
|
||||
|
|
|
@ -385,7 +385,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
if ((!rc) && (vcpu->arch.sie_block->ckc <
|
||||
get_tod_clock() + vcpu->arch.sie_block->epoch)) {
|
||||
get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
|
||||
if ((!psw_extint_disabled(vcpu)) &&
|
||||
(vcpu->arch.sie_block->gcr[0] & 0x800ul))
|
||||
rc = 1;
|
||||
|
@ -425,7 +425,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
|
|||
goto no_timer;
|
||||
}
|
||||
|
||||
now = get_tod_clock() + vcpu->arch.sie_block->epoch;
|
||||
now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
|
||||
if (vcpu->arch.sie_block->ckc < now) {
|
||||
__unset_cpu_idle(vcpu);
|
||||
return 0;
|
||||
|
@ -515,7 +515,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
if ((vcpu->arch.sie_block->ckc <
|
||||
get_tod_clock() + vcpu->arch.sie_block->epoch))
|
||||
get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
|
||||
__try_deliver_ckc_interrupt(vcpu);
|
||||
|
||||
if (atomic_read(&fi->active)) {
|
||||
|
|
|
@ -44,7 +44,7 @@ static void __udelay_disabled(unsigned long long usecs)
|
|||
do {
|
||||
set_clock_comparator(end);
|
||||
vtime_stop_cpu();
|
||||
} while (get_tod_clock() < end);
|
||||
} while (get_tod_clock_fast() < end);
|
||||
lockdep_on();
|
||||
__ctl_load(cr0, 0, 0);
|
||||
__ctl_load(cr6, 6, 6);
|
||||
|
@ -55,7 +55,7 @@ static void __udelay_enabled(unsigned long long usecs)
|
|||
{
|
||||
u64 clock_saved, end;
|
||||
|
||||
end = get_tod_clock() + (usecs << 12);
|
||||
end = get_tod_clock_fast() + (usecs << 12);
|
||||
do {
|
||||
clock_saved = 0;
|
||||
if (end < S390_lowcore.clock_comparator) {
|
||||
|
@ -65,7 +65,7 @@ static void __udelay_enabled(unsigned long long usecs)
|
|||
vtime_stop_cpu();
|
||||
if (clock_saved)
|
||||
local_tick_enable(clock_saved);
|
||||
} while (get_tod_clock() < end);
|
||||
} while (get_tod_clock_fast() < end);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -109,8 +109,8 @@ void udelay_simple(unsigned long long usecs)
|
|||
{
|
||||
u64 end;
|
||||
|
||||
end = get_tod_clock() + (usecs << 12);
|
||||
while (get_tod_clock() < end)
|
||||
end = get_tod_clock_fast() + (usecs << 12);
|
||||
while (get_tod_clock_fast() < end)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
|
@ -120,10 +120,10 @@ void __ndelay(unsigned long long nsecs)
|
|||
|
||||
nsecs <<= 9;
|
||||
do_div(nsecs, 125);
|
||||
end = get_tod_clock() + nsecs;
|
||||
end = get_tod_clock_fast() + nsecs;
|
||||
if (nsecs & ~0xfffUL)
|
||||
__udelay(nsecs >> 12);
|
||||
while (get_tod_clock() < end)
|
||||
while (get_tod_clock_fast() < end)
|
||||
barrier();
|
||||
}
|
||||
EXPORT_SYMBOL(__ndelay);
|
||||
|
|
|
@ -881,7 +881,9 @@ void bpf_jit_free(struct sk_filter *fp)
|
|||
struct bpf_binary_header *header = (void *)addr;
|
||||
|
||||
if (fp->bpf_func == sk_run_filter)
|
||||
return;
|
||||
goto free_filter;
|
||||
set_memory_rw(addr, header->pages);
|
||||
module_free(NULL, header);
|
||||
free_filter:
|
||||
kfree(fp);
|
||||
}
|
||||
|
|
|
@ -808,4 +808,5 @@ void bpf_jit_free(struct sk_filter *fp)
|
|||
{
|
||||
if (fp->bpf_func != sk_run_filter)
|
||||
module_free(NULL, fp->bpf_func);
|
||||
kfree(fp);
|
||||
}
|
||||
|
|
|
@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file,
|
|||
const char __user *buffer, size_t count, loff_t *pos)
|
||||
{
|
||||
char *end, buf[sizeof("nnnnn\0")];
|
||||
size_t size;
|
||||
int tmp;
|
||||
|
||||
if (copy_from_user(buf, buffer, count))
|
||||
size = min(count, sizeof(buf));
|
||||
if (copy_from_user(buf, buffer, size))
|
||||
return -EFAULT;
|
||||
|
||||
tmp = simple_strtol(buf, &end, 0);
|
||||
|
|
|
@ -860,7 +860,7 @@ source "kernel/Kconfig.preempt"
|
|||
|
||||
config X86_UP_APIC
|
||||
bool "Local APIC support on uniprocessors"
|
||||
depends on X86_32 && !SMP && !X86_32_NON_STANDARD
|
||||
depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
|
||||
---help---
|
||||
A local APIC (Advanced Programmable Interrupt Controller) is an
|
||||
integrated interrupt controller in the CPU. If you have a single-CPU
|
||||
|
@ -885,11 +885,11 @@ config X86_UP_IOAPIC
|
|||
|
||||
config X86_LOCAL_APIC
|
||||
def_bool y
|
||||
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
|
||||
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
|
||||
|
||||
config X86_IO_APIC
|
||||
def_bool y
|
||||
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC
|
||||
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI
|
||||
|
||||
config X86_VISWS_APIC
|
||||
def_bool y
|
||||
|
@ -1033,6 +1033,7 @@ config X86_REBOOTFIXUPS
|
|||
|
||||
config MICROCODE
|
||||
tristate "CPU microcode loading support"
|
||||
depends on CPU_SUP_AMD || CPU_SUP_INTEL
|
||||
select FW_LOADER
|
||||
---help---
|
||||
|
||||
|
|
|
@ -128,7 +128,8 @@ do { \
|
|||
do { \
|
||||
typedef typeof(var) pao_T__; \
|
||||
const int pao_ID__ = (__builtin_constant_p(val) && \
|
||||
((val) == 1 || (val) == -1)) ? (val) : 0; \
|
||||
((val) == 1 || (val) == -1)) ? \
|
||||
(int)(val) : 0; \
|
||||
if (0) { \
|
||||
pao_T__ pao_tmp__; \
|
||||
pao_tmp__ = (val); \
|
||||
|
|
|
@ -113,7 +113,7 @@ static int __init early_get_pnodeid(void)
|
|||
break;
|
||||
case UV3_HUB_PART_NUMBER:
|
||||
case UV3_HUB_PART_NUMBER_X:
|
||||
uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1;
|
||||
uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -609,7 +609,7 @@ static struct dentry *d_kvm_debug;
|
|||
|
||||
struct dentry *kvm_init_debugfs(void)
|
||||
{
|
||||
d_kvm_debug = debugfs_create_dir("kvm", NULL);
|
||||
d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
|
||||
if (!d_kvm_debug)
|
||||
printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
|
||||
|
||||
|
@ -775,11 +775,22 @@ void __init kvm_spinlock_init(void)
|
|||
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
|
||||
return;
|
||||
|
||||
printk(KERN_INFO "KVM setup paravirtual spinlock\n");
|
||||
|
||||
static_key_slow_inc(¶virt_ticketlocks_enabled);
|
||||
|
||||
pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
|
||||
pv_lock_ops.unlock_kick = kvm_unlock_kick;
|
||||
}
|
||||
|
||||
static __init int kvm_spinlock_init_jump(void)
|
||||
{
|
||||
if (!kvm_para_available())
|
||||
return 0;
|
||||
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
|
||||
return 0;
|
||||
|
||||
static_key_slow_inc(¶virt_ticketlocks_enabled);
|
||||
printk(KERN_INFO "KVM setup paravirtual spinlock\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(kvm_spinlock_init_jump);
|
||||
|
||||
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
|
||||
|
|
|
@ -326,6 +326,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on the Latitude E5410. */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Dell Latitude E5410",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on the Latitude E5420. */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Dell Latitude E5420",
|
||||
|
|
|
@ -772,13 +772,21 @@ out:
|
|||
return;
|
||||
}
|
||||
|
||||
static void bpf_jit_free_deferred(struct work_struct *work)
|
||||
{
|
||||
struct sk_filter *fp = container_of(work, struct sk_filter, work);
|
||||
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
|
||||
struct bpf_binary_header *header = (void *)addr;
|
||||
|
||||
set_memory_rw(addr, header->pages);
|
||||
module_free(NULL, header);
|
||||
kfree(fp);
|
||||
}
|
||||
|
||||
void bpf_jit_free(struct sk_filter *fp)
|
||||
{
|
||||
if (fp->bpf_func != sk_run_filter) {
|
||||
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
|
||||
struct bpf_binary_header *header = (void *)addr;
|
||||
|
||||
set_memory_rw(addr, header->pages);
|
||||
module_free(NULL, header);
|
||||
INIT_WORK(&fp->work, bpf_jit_free_deferred);
|
||||
schedule_work(&fp->work);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -278,6 +278,15 @@ static void __init xen_smp_prepare_boot_cpu(void)
|
|||
old memory can be recycled */
|
||||
make_lowmem_page_readwrite(xen_initial_gdt);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Xen starts us with XEN_FLAT_RING1_DS, but linux code
|
||||
* expects __USER_DS
|
||||
*/
|
||||
loadsegment(ds, __USER_DS);
|
||||
loadsegment(es, __USER_DS);
|
||||
#endif
|
||||
|
||||
xen_filter_cpu_maps();
|
||||
xen_setup_vcpu_info_placement();
|
||||
}
|
||||
|
|
|
@ -1122,7 +1122,7 @@ ENDPROC(fast_syscall_spill_registers)
|
|||
* a3: exctable, original value in excsave1
|
||||
*/
|
||||
|
||||
fast_syscall_spill_registers_fixup:
|
||||
ENTRY(fast_syscall_spill_registers_fixup)
|
||||
|
||||
rsr a2, windowbase # get current windowbase (a2 is saved)
|
||||
xsr a0, depc # restore depc and a0
|
||||
|
@ -1134,22 +1134,26 @@ fast_syscall_spill_registers_fixup:
|
|||
*/
|
||||
|
||||
xsr a3, excsave1 # get spill-mask
|
||||
slli a2, a3, 1 # shift left by one
|
||||
slli a3, a3, 1 # shift left by one
|
||||
|
||||
slli a3, a2, 32-WSBITS
|
||||
src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy......
|
||||
slli a2, a3, 32-WSBITS
|
||||
src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
|
||||
wsr a2, windowstart # set corrected windowstart
|
||||
|
||||
rsr a3, excsave1
|
||||
l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2
|
||||
l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task)
|
||||
srli a3, a3, 1
|
||||
rsr a2, excsave1
|
||||
l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2
|
||||
xsr a2, excsave1
|
||||
s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3
|
||||
l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task)
|
||||
xsr a2, excsave1
|
||||
|
||||
/* Return to the original (user task) WINDOWBASE.
|
||||
* We leave the following frame behind:
|
||||
* a0, a1, a2 same
|
||||
* a3: trashed (saved in excsave_1)
|
||||
* a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE)
|
||||
* depc: depc (we have to return to that address)
|
||||
* excsave_1: a3
|
||||
* excsave_1: exctable
|
||||
*/
|
||||
|
||||
wsr a3, windowbase
|
||||
|
@ -1159,9 +1163,9 @@ fast_syscall_spill_registers_fixup:
|
|||
* a0: return address
|
||||
* a1: used, stack pointer
|
||||
* a2: kernel stack pointer
|
||||
* a3: available, saved in EXCSAVE_1
|
||||
* a3: available
|
||||
* depc: exception address
|
||||
* excsave: a3
|
||||
* excsave: exctable
|
||||
* Note: This frame might be the same as above.
|
||||
*/
|
||||
|
||||
|
@ -1181,9 +1185,12 @@ fast_syscall_spill_registers_fixup:
|
|||
rsr a0, exccause
|
||||
addx4 a0, a0, a3 # find entry in table
|
||||
l32i a0, a0, EXC_TABLE_FAST_USER # load handler
|
||||
l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
|
||||
jx a0
|
||||
|
||||
fast_syscall_spill_registers_fixup_return:
|
||||
ENDPROC(fast_syscall_spill_registers_fixup)
|
||||
|
||||
ENTRY(fast_syscall_spill_registers_fixup_return)
|
||||
|
||||
/* When we return here, all registers have been restored (a2: DEPC) */
|
||||
|
||||
|
@ -1191,13 +1198,13 @@ fast_syscall_spill_registers_fixup_return:
|
|||
|
||||
/* Restore fixup handler. */
|
||||
|
||||
xsr a3, excsave1
|
||||
movi a2, fast_syscall_spill_registers_fixup
|
||||
s32i a2, a3, EXC_TABLE_FIXUP
|
||||
s32i a0, a3, EXC_TABLE_DOUBLE_SAVE
|
||||
rsr a2, windowbase
|
||||
s32i a2, a3, EXC_TABLE_PARAM
|
||||
l32i a2, a3, EXC_TABLE_KSTK
|
||||
rsr a2, excsave1
|
||||
s32i a3, a2, EXC_TABLE_DOUBLE_SAVE
|
||||
movi a3, fast_syscall_spill_registers_fixup
|
||||
s32i a3, a2, EXC_TABLE_FIXUP
|
||||
rsr a3, windowbase
|
||||
s32i a3, a2, EXC_TABLE_PARAM
|
||||
l32i a2, a2, EXC_TABLE_KSTK
|
||||
|
||||
/* Load WB at the time the exception occurred. */
|
||||
|
||||
|
@ -1206,8 +1213,12 @@ fast_syscall_spill_registers_fixup_return:
|
|||
wsr a3, windowbase
|
||||
rsync
|
||||
|
||||
rsr a3, excsave1
|
||||
l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
|
||||
|
||||
rfde
|
||||
|
||||
ENDPROC(fast_syscall_spill_registers_fixup_return)
|
||||
|
||||
/*
|
||||
* spill all registers.
|
||||
|
|
|
@ -341,7 +341,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
|
||||
sp = regs->areg[1];
|
||||
|
||||
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
|
||||
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
|
||||
sp = current->sas_ss_sp + current->sas_ss_size;
|
||||
}
|
||||
|
||||
|
|
|
@ -737,7 +737,8 @@ static int __init iss_net_setup(char *str)
|
|||
return 1;
|
||||
}
|
||||
|
||||
if ((new = alloc_bootmem(sizeof new)) == NULL) {
|
||||
new = alloc_bootmem(sizeof(*new));
|
||||
if (new == NULL) {
|
||||
printk("Alloc_bootmem failed\n");
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -222,11 +222,16 @@ check_hybrid:
|
|||
* the disk size.
|
||||
*
|
||||
* Hybrid MBRs do not necessarily comply with this.
|
||||
*
|
||||
* Consider a bad value here to be a warning to support dd'ing
|
||||
* an image from a smaller disk to a larger disk.
|
||||
*/
|
||||
if (ret == GPT_MBR_PROTECTIVE) {
|
||||
sz = le32_to_cpu(mbr->partition_record[part].size_in_lba);
|
||||
if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF)
|
||||
ret = 0;
|
||||
pr_debug("GPT: mbr size in lba (%u) different than whole disk (%u).\n",
|
||||
sz, min_t(uint32_t,
|
||||
total_sectors - 1, 0xFFFFFFFF));
|
||||
}
|
||||
done:
|
||||
return ret;
|
||||
|
|
|
@ -24,7 +24,7 @@ menuconfig ACPI
|
|||
are configured, ACPI is used.
|
||||
|
||||
The project home page for the Linux ACPI subsystem is here:
|
||||
<http://www.lesswatts.org/projects/acpi/>
|
||||
<https://01.org/linux-acpi>
|
||||
|
||||
Linux support for ACPI is based on Intel Corporation's ACPI
|
||||
Component Architecture (ACPI CA). For more information on the
|
||||
|
@ -123,9 +123,9 @@ config ACPI_BUTTON
|
|||
default y
|
||||
help
|
||||
This driver handles events on the power, sleep, and lid buttons.
|
||||
A daemon reads /proc/acpi/event and perform user-defined actions
|
||||
such as shutting down the system. This is necessary for
|
||||
software-controlled poweroff.
|
||||
A daemon reads events from input devices or via netlink and
|
||||
performs user-defined actions such as shutting down the system.
|
||||
This is necessary for software-controlled poweroff.
|
||||
|
||||
To compile this driver as a module, choose M here:
|
||||
the module will be called button.
|
||||
|
|
|
@ -1025,60 +1025,4 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
|
|||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
|
||||
|
||||
/**
|
||||
* acpi_dev_pm_add_dependent - Add physical device depending for PM.
|
||||
* @handle: Handle of ACPI device node.
|
||||
* @depdev: Device depending on that node for PM.
|
||||
*/
|
||||
void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev)
|
||||
{
|
||||
struct acpi_device_physical_node *dep;
|
||||
struct acpi_device *adev;
|
||||
|
||||
if (!depdev || acpi_bus_get_device(handle, &adev))
|
||||
return;
|
||||
|
||||
mutex_lock(&adev->physical_node_lock);
|
||||
|
||||
list_for_each_entry(dep, &adev->power_dependent, node)
|
||||
if (dep->dev == depdev)
|
||||
goto out;
|
||||
|
||||
dep = kzalloc(sizeof(*dep), GFP_KERNEL);
|
||||
if (dep) {
|
||||
dep->dev = depdev;
|
||||
list_add_tail(&dep->node, &adev->power_dependent);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&adev->physical_node_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_pm_add_dependent);
|
||||
|
||||
/**
|
||||
* acpi_dev_pm_remove_dependent - Remove physical device depending for PM.
|
||||
* @handle: Handle of ACPI device node.
|
||||
* @depdev: Device depending on that node for PM.
|
||||
*/
|
||||
void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
|
||||
{
|
||||
struct acpi_device_physical_node *dep;
|
||||
struct acpi_device *adev;
|
||||
|
||||
if (!depdev || acpi_bus_get_device(handle, &adev))
|
||||
return;
|
||||
|
||||
mutex_lock(&adev->physical_node_lock);
|
||||
|
||||
list_for_each_entry(dep, &adev->power_dependent, node)
|
||||
if (dep->dev == depdev) {
|
||||
list_del(&dep->node);
|
||||
kfree(dep);
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&adev->physical_node_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
|
||||
#endif /* CONFIG_PM */
|
||||
|
|
|
@ -59,16 +59,9 @@ ACPI_MODULE_NAME("power");
|
|||
#define ACPI_POWER_RESOURCE_STATE_ON 0x01
|
||||
#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
|
||||
|
||||
struct acpi_power_dependent_device {
|
||||
struct list_head node;
|
||||
struct acpi_device *adev;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
struct acpi_power_resource {
|
||||
struct acpi_device device;
|
||||
struct list_head list_node;
|
||||
struct list_head dependent;
|
||||
char *name;
|
||||
u32 system_level;
|
||||
u32 order;
|
||||
|
@ -233,32 +226,6 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void acpi_power_resume_dependent(struct work_struct *work)
|
||||
{
|
||||
struct acpi_power_dependent_device *dep;
|
||||
struct acpi_device_physical_node *pn;
|
||||
struct acpi_device *adev;
|
||||
int state;
|
||||
|
||||
dep = container_of(work, struct acpi_power_dependent_device, work);
|
||||
adev = dep->adev;
|
||||
if (acpi_power_get_inferred_state(adev, &state))
|
||||
return;
|
||||
|
||||
if (state > ACPI_STATE_D0)
|
||||
return;
|
||||
|
||||
mutex_lock(&adev->physical_node_lock);
|
||||
|
||||
list_for_each_entry(pn, &adev->physical_node_list, node)
|
||||
pm_request_resume(pn->dev);
|
||||
|
||||
list_for_each_entry(pn, &adev->power_dependent, node)
|
||||
pm_request_resume(pn->dev);
|
||||
|
||||
mutex_unlock(&adev->physical_node_lock);
|
||||
}
|
||||
|
||||
static int __acpi_power_on(struct acpi_power_resource *resource)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
|
@ -283,14 +250,8 @@ static int acpi_power_on_unlocked(struct acpi_power_resource *resource)
|
|||
resource->name));
|
||||
} else {
|
||||
result = __acpi_power_on(resource);
|
||||
if (result) {
|
||||
if (result)
|
||||
resource->ref_count--;
|
||||
} else {
|
||||
struct acpi_power_dependent_device *dep;
|
||||
|
||||
list_for_each_entry(dep, &resource->dependent, node)
|
||||
schedule_work(&dep->work);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -390,52 +351,6 @@ static int acpi_power_on_list(struct list_head *list)
|
|||
return result;
|
||||
}
|
||||
|
||||
static void acpi_power_add_dependent(struct acpi_power_resource *resource,
|
||||
struct acpi_device *adev)
|
||||
{
|
||||
struct acpi_power_dependent_device *dep;
|
||||
|
||||
mutex_lock(&resource->resource_lock);
|
||||
|
||||
list_for_each_entry(dep, &resource->dependent, node)
|
||||
if (dep->adev == adev)
|
||||
goto out;
|
||||
|
||||
dep = kzalloc(sizeof(*dep), GFP_KERNEL);
|
||||
if (!dep)
|
||||
goto out;
|
||||
|
||||
dep->adev = adev;
|
||||
INIT_WORK(&dep->work, acpi_power_resume_dependent);
|
||||
list_add_tail(&dep->node, &resource->dependent);
|
||||
|
||||
out:
|
||||
mutex_unlock(&resource->resource_lock);
|
||||
}
|
||||
|
||||
static void acpi_power_remove_dependent(struct acpi_power_resource *resource,
|
||||
struct acpi_device *adev)
|
||||
{
|
||||
struct acpi_power_dependent_device *dep;
|
||||
struct work_struct *work = NULL;
|
||||
|
||||
mutex_lock(&resource->resource_lock);
|
||||
|
||||
list_for_each_entry(dep, &resource->dependent, node)
|
||||
if (dep->adev == adev) {
|
||||
list_del(&dep->node);
|
||||
work = &dep->work;
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&resource->resource_lock);
|
||||
|
||||
if (work) {
|
||||
cancel_work_sync(work);
|
||||
kfree(dep);
|
||||
}
|
||||
}
|
||||
|
||||
static struct attribute *attrs[] = {
|
||||
NULL,
|
||||
};
|
||||
|
@ -524,8 +439,6 @@ static void acpi_power_expose_hide(struct acpi_device *adev,
|
|||
|
||||
void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
|
||||
{
|
||||
struct acpi_device_power_state *ps;
|
||||
struct acpi_power_resource_entry *entry;
|
||||
int state;
|
||||
|
||||
if (adev->wakeup.flags.valid)
|
||||
|
@ -535,16 +448,6 @@ void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
|
|||
if (!adev->power.flags.power_resources)
|
||||
return;
|
||||
|
||||
ps = &adev->power.states[ACPI_STATE_D0];
|
||||
list_for_each_entry(entry, &ps->resources, node) {
|
||||
struct acpi_power_resource *resource = entry->resource;
|
||||
|
||||
if (add)
|
||||
acpi_power_add_dependent(resource, adev);
|
||||
else
|
||||
acpi_power_remove_dependent(resource, adev);
|
||||
}
|
||||
|
||||
for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++)
|
||||
acpi_power_expose_hide(adev,
|
||||
&adev->power.states[state].resources,
|
||||
|
@ -882,7 +785,6 @@ int acpi_add_power_resource(acpi_handle handle)
|
|||
acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
|
||||
ACPI_STA_DEFAULT);
|
||||
mutex_init(&resource->resource_lock);
|
||||
INIT_LIST_HEAD(&resource->dependent);
|
||||
INIT_LIST_HEAD(&resource->list_node);
|
||||
resource->name = device->pnp.bus_id;
|
||||
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
|
||||
|
@ -936,8 +838,10 @@ void acpi_resume_power_resources(void)
|
|||
mutex_lock(&resource->resource_lock);
|
||||
|
||||
result = acpi_power_get_state(resource->device.handle, &state);
|
||||
if (result)
|
||||
if (result) {
|
||||
mutex_unlock(&resource->resource_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (state == ACPI_POWER_RESOURCE_STATE_OFF
|
||||
&& resource->ref_count) {
|
||||
|
|
|
@ -999,7 +999,6 @@ int acpi_device_add(struct acpi_device *device,
|
|||
INIT_LIST_HEAD(&device->wakeup_list);
|
||||
INIT_LIST_HEAD(&device->physical_node_list);
|
||||
mutex_init(&device->physical_node_lock);
|
||||
INIT_LIST_HEAD(&device->power_dependent);
|
||||
|
||||
new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
|
||||
if (!new_bus_id) {
|
||||
|
|
|
@ -1343,7 +1343,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
|
||||
host->flags |= ATA_HOST_PARALLEL_SCAN;
|
||||
else
|
||||
printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
|
||||
dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
|
||||
|
||||
if (pi.flags & ATA_FLAG_EM)
|
||||
ahci_reset_em(host);
|
||||
|
|
|
@ -184,7 +184,7 @@ static int ahci_probe(struct platform_device *pdev)
|
|||
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
|
||||
host->flags |= ATA_HOST_PARALLEL_SCAN;
|
||||
else
|
||||
printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
|
||||
dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
|
||||
|
||||
if (pi.flags & ATA_FLAG_EM)
|
||||
ahci_reset_em(host);
|
||||
|
|
|
@ -778,8 +778,16 @@ static void ahci_start_port(struct ata_port *ap)
|
|||
rc = ap->ops->transmit_led_message(ap,
|
||||
emp->led_state,
|
||||
4);
|
||||
/*
|
||||
* If busy, give a breather but do not
|
||||
* release EH ownership by using msleep()
|
||||
* instead of ata_msleep(). EM Transmit
|
||||
* bit is busy for the whole host and
|
||||
* releasing ownership will cause other
|
||||
* ports to fail the same way.
|
||||
*/
|
||||
if (rc == -EBUSY)
|
||||
ata_msleep(ap, 1);
|
||||
msleep(1);
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1035,17 +1035,3 @@ void ata_acpi_on_disable(struct ata_device *dev)
|
|||
{
|
||||
ata_acpi_clear_gtf(dev);
|
||||
}
|
||||
|
||||
void ata_scsi_acpi_bind(struct ata_device *dev)
|
||||
{
|
||||
acpi_handle handle = ata_dev_acpi_handle(dev);
|
||||
if (handle)
|
||||
acpi_dev_pm_add_dependent(handle, &dev->sdev->sdev_gendev);
|
||||
}
|
||||
|
||||
void ata_scsi_acpi_unbind(struct ata_device *dev)
|
||||
{
|
||||
acpi_handle handle = ata_dev_acpi_handle(dev);
|
||||
if (handle)
|
||||
acpi_dev_pm_remove_dependent(handle, &dev->sdev->sdev_gendev);
|
||||
}
|
||||
|
|
|
@ -1322,14 +1322,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
|
|||
* should be retried. To be used from EH.
|
||||
*
|
||||
* SCSI midlayer limits the number of retries to scmd->allowed.
|
||||
* scmd->retries is decremented for commands which get retried
|
||||
* scmd->allowed is incremented for commands which get retried
|
||||
* due to unrelated failures (qc->err_mask is zero).
|
||||
*/
|
||||
void ata_eh_qc_retry(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct scsi_cmnd *scmd = qc->scsicmd;
|
||||
if (!qc->err_mask && scmd->retries)
|
||||
scmd->retries--;
|
||||
if (!qc->err_mask)
|
||||
scmd->allowed++;
|
||||
__ata_eh_qc_complete(qc);
|
||||
}
|
||||
|
||||
|
|
|
@ -3679,7 +3679,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
|
|||
if (!IS_ERR(sdev)) {
|
||||
dev->sdev = sdev;
|
||||
scsi_device_put(sdev);
|
||||
ata_scsi_acpi_bind(dev);
|
||||
} else {
|
||||
dev->sdev = NULL;
|
||||
}
|
||||
|
@ -3767,8 +3766,6 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
|
|||
struct scsi_device *sdev;
|
||||
unsigned long flags;
|
||||
|
||||
ata_scsi_acpi_unbind(dev);
|
||||
|
||||
/* Alas, we need to grab scan_mutex to ensure SCSI device
|
||||
* state doesn't change underneath us and thus
|
||||
* scsi_device_get() always succeeds. The mutex locking can
|
||||
|
|
|
@ -121,8 +121,6 @@ extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
|
|||
extern void ata_acpi_bind_port(struct ata_port *ap);
|
||||
extern void ata_acpi_bind_dev(struct ata_device *dev);
|
||||
extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
|
||||
extern void ata_scsi_acpi_bind(struct ata_device *dev);
|
||||
extern void ata_scsi_acpi_unbind(struct ata_device *dev);
|
||||
#else
|
||||
static inline void ata_acpi_dissociate(struct ata_host *host) { }
|
||||
static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
|
||||
|
@ -133,8 +131,6 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
|
|||
pm_message_t state) { }
|
||||
static inline void ata_acpi_bind_port(struct ata_port *ap) {}
|
||||
static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
|
||||
static inline void ata_scsi_acpi_bind(struct ata_device *dev) {}
|
||||
static inline void ata_scsi_acpi_unbind(struct ata_device *dev) {}
|
||||
#endif
|
||||
|
||||
/* libata-scsi.c */
|
||||
|
|
|
@ -78,7 +78,7 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
|
|||
|
||||
ap->ioaddr.cmd_addr = cmd_addr;
|
||||
|
||||
if (pnp_port_valid(idev, 1) == 0) {
|
||||
if (pnp_port_valid(idev, 1)) {
|
||||
ctl_addr = devm_ioport_map(&idev->dev,
|
||||
pnp_port_start(idev, 1), 1);
|
||||
ap->ioaddr.altstatus_addr = ctl_addr;
|
||||
|
|
|
@ -333,8 +333,10 @@ store_mem_state(struct device *dev,
|
|||
online_type = ONLINE_KEEP;
|
||||
else if (!strncmp(buf, "offline", min_t(int, count, 7)))
|
||||
online_type = -1;
|
||||
else
|
||||
return -EINVAL;
|
||||
else {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
switch (online_type) {
|
||||
case ONLINE_KERNEL:
|
||||
|
@ -357,6 +359,7 @@ store_mem_state(struct device *dev,
|
|||
ret = -EINVAL; /* should never happen */
|
||||
}
|
||||
|
||||
err:
|
||||
unlock_device_hotplug();
|
||||
|
||||
if (ret)
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <xen/xen.h>
|
||||
#include <xen/events.h>
|
||||
#include <xen/interface/io/tpmif.h>
|
||||
#include <xen/grant_table.h>
|
||||
|
|
|
@ -65,6 +65,7 @@ void proc_fork_connector(struct task_struct *task)
|
|||
|
||||
msg = (struct cn_msg *)buffer;
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||
|
@ -80,6 +81,7 @@ void proc_fork_connector(struct task_struct *task)
|
|||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
/* If cn_netlink_send() failed, the data is not sent */
|
||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
@ -96,6 +98,7 @@ void proc_exec_connector(struct task_struct *task)
|
|||
|
||||
msg = (struct cn_msg *)buffer;
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||
|
@ -106,6 +109,7 @@ void proc_exec_connector(struct task_struct *task)
|
|||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
|
@ -122,6 +126,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
|
|||
|
||||
msg = (struct cn_msg *)buffer;
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
ev->what = which_id;
|
||||
ev->event_data.id.process_pid = task->pid;
|
||||
ev->event_data.id.process_tgid = task->tgid;
|
||||
|
@ -145,6 +150,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
|
|||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
|
@ -160,6 +166,7 @@ void proc_sid_connector(struct task_struct *task)
|
|||
|
||||
msg = (struct cn_msg *)buffer;
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||
|
@ -170,6 +177,7 @@ void proc_sid_connector(struct task_struct *task)
|
|||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
|
@ -185,6 +193,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
|
|||
|
||||
msg = (struct cn_msg *)buffer;
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||
|
@ -203,6 +212,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
|
|||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
|
@ -218,6 +228,7 @@ void proc_comm_connector(struct task_struct *task)
|
|||
|
||||
msg = (struct cn_msg *)buffer;
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||
|
@ -229,6 +240,7 @@ void proc_comm_connector(struct task_struct *task)
|
|||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
|
@ -244,6 +256,7 @@ void proc_coredump_connector(struct task_struct *task)
|
|||
|
||||
msg = (struct cn_msg *)buffer;
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||
|
@ -254,6 +267,7 @@ void proc_coredump_connector(struct task_struct *task)
|
|||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
|
@ -269,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
|
|||
|
||||
msg = (struct cn_msg *)buffer;
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||
|
@ -281,6 +296,7 @@ void proc_exit_connector(struct task_struct *task)
|
|||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
|
@ -304,6 +320,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
|
|||
|
||||
msg = (struct cn_msg *)buffer;
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
msg->seq = rcvd_seq;
|
||||
ktime_get_ts(&ts); /* get high res monotonic timestamp */
|
||||
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
|
||||
|
@ -313,6 +330,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
|
|||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = rcvd_ack + 1;
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
|
|||
|
||||
data = nlmsg_data(nlh);
|
||||
|
||||
memcpy(data, msg, sizeof(*data) + msg->len);
|
||||
memcpy(data, msg, size);
|
||||
|
||||
NETLINK_CB(skb).dst_group = group;
|
||||
|
||||
|
@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
|
|||
static void cn_rx_skb(struct sk_buff *__skb)
|
||||
{
|
||||
struct nlmsghdr *nlh;
|
||||
int err;
|
||||
struct sk_buff *skb;
|
||||
int len, err;
|
||||
|
||||
skb = skb_get(__skb);
|
||||
|
||||
if (skb->len >= NLMSG_HDRLEN) {
|
||||
nlh = nlmsg_hdr(skb);
|
||||
len = nlmsg_len(nlh);
|
||||
|
||||
if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
|
||||
if (len < (int)sizeof(struct cn_msg) ||
|
||||
skb->len < nlh->nlmsg_len ||
|
||||
nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
|
||||
len > CONNECTOR_MAX_MSG_SIZE) {
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -986,12 +986,12 @@ static int __init acpi_cpufreq_init(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (acpi_disabled)
|
||||
return -ENODEV;
|
||||
|
||||
/* don't keep reloading if cpufreq_driver exists */
|
||||
if (cpufreq_get_current_driver())
|
||||
return 0;
|
||||
|
||||
if (acpi_disabled)
|
||||
return 0;
|
||||
return -EEXIST;
|
||||
|
||||
pr_debug("acpi_cpufreq_init\n");
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
|
|||
}
|
||||
|
||||
struct sample {
|
||||
int core_pct_busy;
|
||||
int32_t core_pct_busy;
|
||||
u64 aperf;
|
||||
u64 mperf;
|
||||
int freq;
|
||||
|
@ -68,7 +68,7 @@ struct _pid {
|
|||
int32_t i_gain;
|
||||
int32_t d_gain;
|
||||
int deadband;
|
||||
int last_err;
|
||||
int32_t last_err;
|
||||
};
|
||||
|
||||
struct cpudata {
|
||||
|
@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent)
|
|||
pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
|
||||
}
|
||||
|
||||
static signed int pid_calc(struct _pid *pid, int busy)
|
||||
static signed int pid_calc(struct _pid *pid, int32_t busy)
|
||||
{
|
||||
signed int err, result;
|
||||
signed int result;
|
||||
int32_t pterm, dterm, fp_error;
|
||||
int32_t integral_limit;
|
||||
|
||||
err = pid->setpoint - busy;
|
||||
fp_error = int_tofp(err);
|
||||
fp_error = int_tofp(pid->setpoint) - busy;
|
||||
|
||||
if (abs(err) <= pid->deadband)
|
||||
if (abs(fp_error) <= int_tofp(pid->deadband))
|
||||
return 0;
|
||||
|
||||
pterm = mul_fp(pid->p_gain, fp_error);
|
||||
|
@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy)
|
|||
if (pid->integral < -integral_limit)
|
||||
pid->integral = -integral_limit;
|
||||
|
||||
dterm = mul_fp(pid->d_gain, (err - pid->last_err));
|
||||
pid->last_err = err;
|
||||
dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
|
||||
pid->last_err = fp_error;
|
||||
|
||||
result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
|
||||
|
||||
|
@ -367,12 +366,13 @@ static int intel_pstate_turbo_pstate(void)
|
|||
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
|
||||
{
|
||||
int max_perf = cpu->pstate.turbo_pstate;
|
||||
int max_perf_adj;
|
||||
int min_perf;
|
||||
if (limits.no_turbo)
|
||||
max_perf = cpu->pstate.max_pstate;
|
||||
|
||||
max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
|
||||
*max = clamp_t(int, max_perf,
|
||||
max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
|
||||
*max = clamp_t(int, max_perf_adj,
|
||||
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
|
||||
|
||||
min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
|
||||
|
@ -383,6 +383,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
|
|||
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
|
||||
{
|
||||
int max_perf, min_perf;
|
||||
u64 val;
|
||||
|
||||
intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
|
||||
|
||||
|
@ -394,11 +395,11 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
|
|||
trace_cpu_frequency(pstate * 100000, cpu->cpu);
|
||||
|
||||
cpu->pstate.current_pstate = pstate;
|
||||
val = pstate << 8;
|
||||
if (limits.no_turbo)
|
||||
wrmsrl(MSR_IA32_PERF_CTL, BIT(32) | (pstate << 8));
|
||||
else
|
||||
wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
|
||||
val |= (u64)1 << 32;
|
||||
|
||||
wrmsrl(MSR_IA32_PERF_CTL, val);
|
||||
}
|
||||
|
||||
static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
|
||||
|
@ -435,8 +436,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
|
|||
struct sample *sample)
|
||||
{
|
||||
u64 core_pct;
|
||||
core_pct = div64_u64(sample->aperf * 100, sample->mperf);
|
||||
sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
|
||||
core_pct = div64_u64(int_tofp(sample->aperf * 100),
|
||||
sample->mperf);
|
||||
sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
|
||||
|
||||
sample->core_pct_busy = core_pct;
|
||||
}
|
||||
|
@ -468,22 +470,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
|
|||
mod_timer_pinned(&cpu->timer, jiffies + delay);
|
||||
}
|
||||
|
||||
static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
|
||||
static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
|
||||
{
|
||||
int32_t busy_scaled;
|
||||
int32_t core_busy, max_pstate, current_pstate;
|
||||
|
||||
core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
|
||||
core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
|
||||
max_pstate = int_tofp(cpu->pstate.max_pstate);
|
||||
current_pstate = int_tofp(cpu->pstate.current_pstate);
|
||||
busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
|
||||
|
||||
return fp_toint(busy_scaled);
|
||||
return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
|
||||
}
|
||||
|
||||
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
|
||||
{
|
||||
int busy_scaled;
|
||||
int32_t busy_scaled;
|
||||
struct _pid *pid;
|
||||
signed int ctl = 0;
|
||||
int steps;
|
||||
|
@ -637,8 +636,8 @@ static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
|
|||
|
||||
static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int rc, min_pstate, max_pstate;
|
||||
struct cpudata *cpu;
|
||||
int rc;
|
||||
|
||||
rc = intel_pstate_init_cpu(policy->cpu);
|
||||
if (rc)
|
||||
|
@ -652,9 +651,8 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
|||
else
|
||||
policy->policy = CPUFREQ_POLICY_POWERSAVE;
|
||||
|
||||
intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
|
||||
policy->min = min_pstate * 100000;
|
||||
policy->max = max_pstate * 100000;
|
||||
policy->min = cpu->pstate.min_pstate * 100000;
|
||||
policy->max = cpu->pstate.turbo_pstate * 100000;
|
||||
|
||||
/* cpuinfo and default policy values */
|
||||
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
|
||||
|
|
|
@ -166,7 +166,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
|
|||
if (freq->frequency == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
|
||||
dvfs = &s3c64xx_dvfs_table[freq->index];
|
||||
dvfs = &s3c64xx_dvfs_table[freq->driver_data];
|
||||
found = 0;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
|
|
@ -305,7 +305,9 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|||
edma_alloc_slot(EDMA_CTLR(echan->ch_num),
|
||||
EDMA_SLOT_ANY);
|
||||
if (echan->slot[i] < 0) {
|
||||
kfree(edesc);
|
||||
dev_err(dev, "Failed to allocate slot\n");
|
||||
kfree(edesc);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
@ -345,6 +347,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|||
ccnt = sg_dma_len(sg) / (acnt * bcnt);
|
||||
if (ccnt > (SZ_64K - 1)) {
|
||||
dev_err(dev, "Exceeded max SG segment size\n");
|
||||
kfree(edesc);
|
||||
return NULL;
|
||||
}
|
||||
cidx = acnt * bcnt;
|
||||
|
|
|
@ -93,6 +93,7 @@ struct hpb_dmae_chan {
|
|||
void __iomem *base;
|
||||
const struct hpb_dmae_slave_config *cfg;
|
||||
char dev_id[16]; /* unique name per DMAC of channel */
|
||||
dma_addr_t slave_addr;
|
||||
};
|
||||
|
||||
struct hpb_dmae_device {
|
||||
|
@ -432,7 +433,6 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
|
|||
hpb_chan->xfer_mode = XFER_DOUBLE;
|
||||
} else {
|
||||
dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
|
||||
shdma_free_irq(&hpb_chan->shdma_chan);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -446,7 +446,8 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
|
||||
static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id,
|
||||
dma_addr_t slave_addr, bool try)
|
||||
{
|
||||
struct hpb_dmae_chan *chan = to_chan(schan);
|
||||
const struct hpb_dmae_slave_config *sc =
|
||||
|
@ -457,6 +458,7 @@ static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
|
|||
if (try)
|
||||
return 0;
|
||||
chan->cfg = sc;
|
||||
chan->slave_addr = slave_addr ? : sc->addr;
|
||||
return hpb_dmae_alloc_chan_resources(chan, sc);
|
||||
}
|
||||
|
||||
|
@ -468,7 +470,7 @@ static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
|
|||
{
|
||||
struct hpb_dmae_chan *chan = to_chan(schan);
|
||||
|
||||
return chan->cfg->addr;
|
||||
return chan->slave_addr;
|
||||
}
|
||||
|
||||
static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
|
||||
|
@ -614,7 +616,6 @@ static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
|
|||
shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
|
||||
BUG_ON(!schan);
|
||||
|
||||
shdma_free_irq(schan);
|
||||
shdma_chan_remove(schan);
|
||||
}
|
||||
dma_dev->chancnt = 0;
|
||||
|
|
|
@ -248,14 +248,15 @@ static void lp_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
|
|||
struct lp_gpio *lg = irq_data_get_irq_handler_data(data);
|
||||
struct irq_chip *chip = irq_data_get_irq_chip(data);
|
||||
u32 base, pin, mask;
|
||||
unsigned long reg, pending;
|
||||
unsigned long reg, ena, pending;
|
||||
unsigned virq;
|
||||
|
||||
/* check from GPIO controller which pin triggered the interrupt */
|
||||
for (base = 0; base < lg->chip.ngpio; base += 32) {
|
||||
reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
|
||||
ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
|
||||
|
||||
while ((pending = inl(reg))) {
|
||||
while ((pending = (inl(reg) & inl(ena)))) {
|
||||
pin = __ffs(pending);
|
||||
mask = BIT(pin);
|
||||
/* Clear before handling so we don't lose an edge */
|
||||
|
|
|
@ -136,7 +136,7 @@ static struct gpio_desc *gpio_to_desc(unsigned gpio)
|
|||
*/
|
||||
static int desc_to_gpio(const struct gpio_desc *desc)
|
||||
{
|
||||
return desc->chip->base + gpio_chip_hwgpio(desc);
|
||||
return desc - &gpio_desc[0];
|
||||
}
|
||||
|
||||
|
||||
|
@ -1398,7 +1398,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
|
|||
int status = -EPROBE_DEFER;
|
||||
unsigned long flags;
|
||||
|
||||
if (!desc || !desc->chip) {
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1406,6 +1406,8 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
|
|||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
chip = desc->chip;
|
||||
if (chip == NULL)
|
||||
goto done;
|
||||
|
||||
if (!try_module_get(chip->owner))
|
||||
goto done;
|
||||
|
|
|
@ -61,7 +61,7 @@ static int drm_version(struct drm_device *dev, void *data,
|
|||
|
||||
/** Ioctl table */
|
||||
static const struct drm_ioctl_desc drm_ioctls[] = {
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
|
||||
|
@ -402,9 +402,16 @@ long drm_ioctl(struct file *filp,
|
|||
cmd = ioctl->cmd_drv;
|
||||
}
|
||||
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
|
||||
u32 drv_size;
|
||||
|
||||
ioctl = &drm_ioctls[nr];
|
||||
cmd = ioctl->cmd;
|
||||
|
||||
drv_size = _IOC_SIZE(ioctl->cmd);
|
||||
usize = asize = _IOC_SIZE(cmd);
|
||||
if (drv_size > asize)
|
||||
asize = drv_size;
|
||||
|
||||
cmd = ioctl->cmd;
|
||||
} else
|
||||
goto err_i1;
|
||||
|
||||
|
|
|
@ -2925,6 +2925,8 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
|
|||
/* Speaker Allocation Data Block */
|
||||
if (dbl == 3) {
|
||||
*sadb = kmalloc(dbl, GFP_KERNEL);
|
||||
if (!*sadb)
|
||||
return -ENOMEM;
|
||||
memcpy(*sadb, &db[1], dbl);
|
||||
count = dbl;
|
||||
break;
|
||||
|
|
|
@ -407,14 +407,6 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
|
|||
struct drm_connector *connector;
|
||||
int i, j;
|
||||
|
||||
/*
|
||||
* fbdev->blank can be called from irq context in case of a panic.
|
||||
* Since we already have our own special panic handler which will
|
||||
* restore the fbdev console mode completely, just bail out early.
|
||||
*/
|
||||
if (oops_in_progress)
|
||||
return;
|
||||
|
||||
/*
|
||||
* fbdev->blank can be called from irq context in case of a panic.
|
||||
* Since we already have our own special panic handler which will
|
||||
|
|
|
@ -204,6 +204,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
|
|||
if (IS_ERR(pages))
|
||||
return PTR_ERR(pages);
|
||||
|
||||
gt->npage = gt->gem.size / PAGE_SIZE;
|
||||
gt->pages = pages;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1290,12 +1290,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
* then we do not take part in VGA arbitration and the
|
||||
* vga_client_register() fails with -ENODEV.
|
||||
*/
|
||||
if (!HAS_PCH_SPLIT(dev)) {
|
||||
ret = vga_client_register(dev->pdev, dev, NULL,
|
||||
i915_vga_set_decode);
|
||||
if (ret && ret != -ENODEV)
|
||||
goto out;
|
||||
}
|
||||
ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
|
||||
if (ret && ret != -ENODEV)
|
||||
goto out;
|
||||
|
||||
intel_register_dsm_handler();
|
||||
|
||||
|
@ -1351,12 +1348,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
*/
|
||||
intel_fbdev_initial_config(dev);
|
||||
|
||||
/*
|
||||
* Must do this after fbcon init so that
|
||||
* vgacon_save_screen() works during the handover.
|
||||
*/
|
||||
i915_disable_vga_mem(dev);
|
||||
|
||||
/* Only enable hotplug handling once the fbdev is fully set up. */
|
||||
dev_priv->enable_hotplug_processing = true;
|
||||
|
||||
|
|
|
@ -505,6 +505,8 @@ static int i915_drm_freeze(struct drm_device *dev)
|
|||
intel_modeset_suspend_hw(dev);
|
||||
}
|
||||
|
||||
i915_gem_suspend_gtt_mappings(dev);
|
||||
|
||||
i915_save_state(dev);
|
||||
|
||||
intel_opregion_fini(dev);
|
||||
|
@ -648,7 +650,8 @@ static int i915_drm_thaw(struct drm_device *dev)
|
|||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_restore_gtt_mappings(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
} else if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
i915_check_and_clear_faults(dev);
|
||||
|
||||
__i915_drm_thaw(dev);
|
||||
|
||||
|
|
|
@ -497,10 +497,12 @@ struct i915_address_space {
|
|||
|
||||
/* FIXME: Need a more generic return type */
|
||||
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
|
||||
enum i915_cache_level level);
|
||||
enum i915_cache_level level,
|
||||
bool valid); /* Create a valid PTE */
|
||||
void (*clear_range)(struct i915_address_space *vm,
|
||||
unsigned int first_entry,
|
||||
unsigned int num_entries);
|
||||
unsigned int num_entries,
|
||||
bool use_scratch);
|
||||
void (*insert_entries)(struct i915_address_space *vm,
|
||||
struct sg_table *st,
|
||||
unsigned int first_entry,
|
||||
|
@ -2065,6 +2067,8 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
|
|||
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_gem_object *obj);
|
||||
|
||||
void i915_check_and_clear_faults(struct drm_device *dev);
|
||||
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
|
||||
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
|
||||
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
|
||||
|
|
|
@ -58,9 +58,10 @@
|
|||
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
|
||||
|
||||
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level)
|
||||
enum i915_cache_level level,
|
||||
bool valid)
|
||||
{
|
||||
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
|
||||
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
|
||||
pte |= GEN6_PTE_ADDR_ENCODE(addr);
|
||||
|
||||
switch (level) {
|
||||
|
@ -79,9 +80,10 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
|
|||
}
|
||||
|
||||
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level)
|
||||
enum i915_cache_level level,
|
||||
bool valid)
|
||||
{
|
||||
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
|
||||
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
|
||||
pte |= GEN6_PTE_ADDR_ENCODE(addr);
|
||||
|
||||
switch (level) {
|
||||
|
@ -105,9 +107,10 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
|
|||
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
|
||||
|
||||
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level)
|
||||
enum i915_cache_level level,
|
||||
bool valid)
|
||||
{
|
||||
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
|
||||
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
|
||||
pte |= GEN6_PTE_ADDR_ENCODE(addr);
|
||||
|
||||
/* Mark the page as writeable. Other platforms don't have a
|
||||
|
@ -122,9 +125,10 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
|
|||
}
|
||||
|
||||
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level)
|
||||
enum i915_cache_level level,
|
||||
bool valid)
|
||||
{
|
||||
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
|
||||
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
|
||||
pte |= HSW_PTE_ADDR_ENCODE(addr);
|
||||
|
||||
if (level != I915_CACHE_NONE)
|
||||
|
@ -134,9 +138,10 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
|
|||
}
|
||||
|
||||
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level)
|
||||
enum i915_cache_level level,
|
||||
bool valid)
|
||||
{
|
||||
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
|
||||
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
|
||||
pte |= HSW_PTE_ADDR_ENCODE(addr);
|
||||
|
||||
switch (level) {
|
||||
|
@ -236,7 +241,8 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
|
|||
/* PPGTT support for Sandybdrige/Gen6 and later */
|
||||
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
|
||||
unsigned first_entry,
|
||||
unsigned num_entries)
|
||||
unsigned num_entries,
|
||||
bool use_scratch)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
|
@ -245,7 +251,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
|
|||
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
|
||||
unsigned last_pte, i;
|
||||
|
||||
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
|
||||
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
|
||||
|
||||
while (num_entries) {
|
||||
last_pte = first_pte + num_entries;
|
||||
|
@ -282,7 +288,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
|
|||
dma_addr_t page_addr;
|
||||
|
||||
page_addr = sg_page_iter_dma_address(&sg_iter);
|
||||
pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
|
||||
pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
|
||||
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
|
||||
kunmap_atomic(pt_vaddr);
|
||||
act_pt++;
|
||||
|
@ -367,7 +373,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
|||
}
|
||||
|
||||
ppgtt->base.clear_range(&ppgtt->base, 0,
|
||||
ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
|
||||
ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
|
||||
|
||||
ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
|
||||
|
||||
|
@ -444,7 +450,8 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
|
|||
{
|
||||
ppgtt->base.clear_range(&ppgtt->base,
|
||||
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
|
||||
obj->base.size >> PAGE_SHIFT);
|
||||
obj->base.size >> PAGE_SHIFT,
|
||||
true);
|
||||
}
|
||||
|
||||
extern int intel_iommu_gfx_mapped;
|
||||
|
@ -485,15 +492,65 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
|
|||
dev_priv->mm.interruptible = interruptible;
|
||||
}
|
||||
|
||||
void i915_check_and_clear_faults(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_ring_buffer *ring;
|
||||
int i;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 6)
|
||||
return;
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
u32 fault_reg;
|
||||
fault_reg = I915_READ(RING_FAULT_REG(ring));
|
||||
if (fault_reg & RING_FAULT_VALID) {
|
||||
DRM_DEBUG_DRIVER("Unexpected fault\n"
|
||||
"\tAddr: 0x%08lx\\n"
|
||||
"\tAddress space: %s\n"
|
||||
"\tSource ID: %d\n"
|
||||
"\tType: %d\n",
|
||||
fault_reg & PAGE_MASK,
|
||||
fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
|
||||
RING_FAULT_SRCID(fault_reg),
|
||||
RING_FAULT_FAULT_TYPE(fault_reg));
|
||||
I915_WRITE(RING_FAULT_REG(ring),
|
||||
fault_reg & ~RING_FAULT_VALID);
|
||||
}
|
||||
}
|
||||
POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
|
||||
}
|
||||
|
||||
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* Don't bother messing with faults pre GEN6 as we have little
|
||||
* documentation supporting that it's a good idea.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen < 6)
|
||||
return;
|
||||
|
||||
i915_check_and_clear_faults(dev);
|
||||
|
||||
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
|
||||
dev_priv->gtt.base.start / PAGE_SIZE,
|
||||
dev_priv->gtt.base.total / PAGE_SIZE,
|
||||
false);
|
||||
}
|
||||
|
||||
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
i915_check_and_clear_faults(dev);
|
||||
|
||||
/* First fill our portion of the GTT with scratch pages */
|
||||
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
|
||||
dev_priv->gtt.base.start / PAGE_SIZE,
|
||||
dev_priv->gtt.base.total / PAGE_SIZE);
|
||||
dev_priv->gtt.base.total / PAGE_SIZE,
|
||||
true);
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
i915_gem_clflush_object(obj, obj->pin_display);
|
||||
|
@ -536,7 +593,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
|
|||
|
||||
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
|
||||
addr = sg_page_iter_dma_address(&sg_iter);
|
||||
iowrite32(vm->pte_encode(addr, level), >t_entries[i]);
|
||||
iowrite32(vm->pte_encode(addr, level, true), >t_entries[i]);
|
||||
i++;
|
||||
}
|
||||
|
||||
|
@ -548,7 +605,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
|
|||
*/
|
||||
if (i != 0)
|
||||
WARN_ON(readl(>t_entries[i-1]) !=
|
||||
vm->pte_encode(addr, level));
|
||||
vm->pte_encode(addr, level, true));
|
||||
|
||||
/* This next bit makes the above posting read even more important. We
|
||||
* want to flush the TLBs only after we're certain all the PTE updates
|
||||
|
@ -560,7 +617,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
|
|||
|
||||
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
|
||||
unsigned int first_entry,
|
||||
unsigned int num_entries)
|
||||
unsigned int num_entries,
|
||||
bool use_scratch)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vm->dev->dev_private;
|
||||
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
|
||||
|
@ -573,7 +631,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
|
|||
first_entry, num_entries, max_entries))
|
||||
num_entries = max_entries;
|
||||
|
||||
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
|
||||
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
|
||||
|
||||
for (i = 0; i < num_entries; i++)
|
||||
iowrite32(scratch_pte, >t_base[i]);
|
||||
readl(gtt_base);
|
||||
|
@ -594,7 +653,8 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
|
|||
|
||||
static void i915_ggtt_clear_range(struct i915_address_space *vm,
|
||||
unsigned int first_entry,
|
||||
unsigned int num_entries)
|
||||
unsigned int num_entries,
|
||||
bool unused)
|
||||
{
|
||||
intel_gtt_clear_range(first_entry, num_entries);
|
||||
}
|
||||
|
@ -622,7 +682,8 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
|
|||
|
||||
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
|
||||
entry,
|
||||
obj->base.size >> PAGE_SHIFT);
|
||||
obj->base.size >> PAGE_SHIFT,
|
||||
true);
|
||||
|
||||
obj->has_global_gtt_mapping = 0;
|
||||
}
|
||||
|
@ -709,11 +770,11 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
|
|||
const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
|
||||
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
|
||||
hole_start, hole_end);
|
||||
ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
|
||||
ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
|
||||
}
|
||||
|
||||
/* And finally clear the reserved guard page */
|
||||
ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
|
||||
ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
|
|
@ -604,6 +604,10 @@
|
|||
#define ARB_MODE_SWIZZLE_IVB (1<<5)
|
||||
#define RENDER_HWS_PGA_GEN7 (0x04080)
|
||||
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
|
||||
#define RING_FAULT_GTTSEL_MASK (1<<11)
|
||||
#define RING_FAULT_SRCID(x) ((x >> 3) & 0xff)
|
||||
#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
|
||||
#define RING_FAULT_VALID (1<<0)
|
||||
#define DONE_REG 0x40b0
|
||||
#define BSD_HWS_PGA_GEN7 (0x04180)
|
||||
#define BLT_HWS_PGA_GEN7 (0x04280)
|
||||
|
@ -3881,6 +3885,9 @@
|
|||
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
|
||||
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
|
||||
|
||||
#define HSW_SCRATCH1 0xb038
|
||||
#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
|
||||
|
||||
#define HSW_FUSE_STRAP 0x42014
|
||||
#define HSW_CDCLK_LIMIT (1 << 24)
|
||||
|
||||
|
@ -4276,7 +4283,9 @@
|
|||
#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
|
||||
|
||||
#define SOUTH_DSPCLK_GATE_D 0xc2020
|
||||
#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
|
||||
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
|
||||
#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
|
||||
#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
|
||||
|
||||
/* CPU: FDI_TX */
|
||||
|
@ -4728,6 +4737,9 @@
|
|||
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
|
||||
#define DOP_CLOCK_GATING_DISABLE (1<<0)
|
||||
|
||||
#define HSW_ROW_CHICKEN3 0xe49c
|
||||
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
|
||||
|
||||
#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
|
||||
#define INTEL_AUDIO_DEVCL 0x808629FB
|
||||
#define INTEL_AUDIO_DEVBLC 0x80862801
|
||||
|
|
|
@ -83,8 +83,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void intel_crt_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config)
|
||||
static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_crt *crt = intel_encoder_to_crt(encoder);
|
||||
|
@ -102,7 +101,27 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
|
|||
else
|
||||
flags |= DRM_MODE_FLAG_NVSYNC;
|
||||
|
||||
pipe_config->adjusted_mode.flags |= flags;
|
||||
return flags;
|
||||
}
|
||||
|
||||
static void intel_crt_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
|
||||
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
|
||||
}
|
||||
|
||||
static void hsw_crt_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config)
|
||||
{
|
||||
intel_ddi_get_config(encoder, pipe_config);
|
||||
|
||||
pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
|
||||
DRM_MODE_FLAG_NHSYNC |
|
||||
DRM_MODE_FLAG_PVSYNC |
|
||||
DRM_MODE_FLAG_NVSYNC);
|
||||
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
|
||||
}
|
||||
|
||||
/* Note: The caller is required to filter out dpms modes not supported by the
|
||||
|
@ -799,7 +818,10 @@ void intel_crt_init(struct drm_device *dev)
|
|||
crt->base.mode_set = intel_crt_mode_set;
|
||||
crt->base.disable = intel_disable_crt;
|
||||
crt->base.enable = intel_enable_crt;
|
||||
crt->base.get_config = intel_crt_get_config;
|
||||
if (IS_HASWELL(dev))
|
||||
crt->base.get_config = hsw_crt_get_config;
|
||||
else
|
||||
crt->base.get_config = intel_crt_get_config;
|
||||
if (I915_HAS_HOTPLUG(dev))
|
||||
crt->base.hpd_pin = HPD_CRT;
|
||||
if (HAS_DDI(dev))
|
||||
|
|
|
@ -1249,8 +1249,8 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
|
|||
intel_dp_check_link_status(intel_dp);
|
||||
}
|
||||
|
||||
static void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config)
|
||||
void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
|
@ -1268,6 +1268,23 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
|
|||
flags |= DRM_MODE_FLAG_NVSYNC;
|
||||
|
||||
pipe_config->adjusted_mode.flags |= flags;
|
||||
|
||||
switch (temp & TRANS_DDI_BPC_MASK) {
|
||||
case TRANS_DDI_BPC_6:
|
||||
pipe_config->pipe_bpp = 18;
|
||||
break;
|
||||
case TRANS_DDI_BPC_8:
|
||||
pipe_config->pipe_bpp = 24;
|
||||
break;
|
||||
case TRANS_DDI_BPC_10:
|
||||
pipe_config->pipe_bpp = 30;
|
||||
break;
|
||||
case TRANS_DDI_BPC_12:
|
||||
pipe_config->pipe_bpp = 36;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_ddi_destroy(struct drm_encoder *encoder)
|
||||
|
|
|
@ -2327,9 +2327,10 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
|
|||
FDI_FE_ERRC_ENABLE);
|
||||
}
|
||||
|
||||
static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc)
|
||||
static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
|
||||
{
|
||||
return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder;
|
||||
return crtc->base.enabled && crtc->active &&
|
||||
crtc->config.has_pch_encoder;
|
||||
}
|
||||
|
||||
static void ivb_modeset_global_resources(struct drm_device *dev)
|
||||
|
@ -2979,6 +2980,48 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
|
|||
I915_READ(VSYNCSHIFT(cpu_transcoder)));
|
||||
}
|
||||
|
||||
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t temp;
|
||||
|
||||
temp = I915_READ(SOUTH_CHICKEN1);
|
||||
if (temp & FDI_BC_BIFURCATION_SELECT)
|
||||
return;
|
||||
|
||||
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
|
||||
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
|
||||
|
||||
temp |= FDI_BC_BIFURCATION_SELECT;
|
||||
DRM_DEBUG_KMS("enabling fdi C rx\n");
|
||||
I915_WRITE(SOUTH_CHICKEN1, temp);
|
||||
POSTING_READ(SOUTH_CHICKEN1);
|
||||
}
|
||||
|
||||
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
|
||||
{
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
switch (intel_crtc->pipe) {
|
||||
case PIPE_A:
|
||||
break;
|
||||
case PIPE_B:
|
||||
if (intel_crtc->config.fdi_lanes > 2)
|
||||
WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
|
||||
else
|
||||
cpt_enable_fdi_bc_bifurcation(dev);
|
||||
|
||||
break;
|
||||
case PIPE_C:
|
||||
cpt_enable_fdi_bc_bifurcation(dev);
|
||||
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable PCH resources required for PCH ports:
|
||||
* - PCH PLLs
|
||||
|
@ -2997,6 +3040,9 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
|
|||
|
||||
assert_pch_transcoder_disabled(dev_priv, pipe);
|
||||
|
||||
if (IS_IVYBRIDGE(dev))
|
||||
ivybridge_update_fdi_bc_bifurcation(intel_crtc);
|
||||
|
||||
/* Write the TU size bits before fdi link training, so that error
|
||||
* detection works. */
|
||||
I915_WRITE(FDI_RX_TUSIZE1(pipe),
|
||||
|
@ -3941,8 +3987,6 @@ static void intel_connector_check_state(struct intel_connector *connector)
|
|||
* consider. */
|
||||
void intel_connector_dpms(struct drm_connector *connector, int mode)
|
||||
{
|
||||
struct intel_encoder *encoder = intel_attached_encoder(connector);
|
||||
|
||||
/* All the simple cases only support two dpms states. */
|
||||
if (mode != DRM_MODE_DPMS_ON)
|
||||
mode = DRM_MODE_DPMS_OFF;
|
||||
|
@ -3953,10 +3997,8 @@ void intel_connector_dpms(struct drm_connector *connector, int mode)
|
|||
connector->dpms = mode;
|
||||
|
||||
/* Only need to change hw state when actually enabled */
|
||||
if (encoder->base.crtc)
|
||||
intel_encoder_dpms(encoder, mode);
|
||||
else
|
||||
WARN_ON(encoder->connectors_active != false);
|
||||
if (connector->encoder)
|
||||
intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
|
||||
|
||||
intel_modeset_check_state(connector->dev);
|
||||
}
|
||||
|
@ -4987,6 +5029,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
|
|||
if (!(tmp & PIPECONF_ENABLE))
|
||||
return false;
|
||||
|
||||
if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
|
||||
switch (tmp & PIPECONF_BPC_MASK) {
|
||||
case PIPECONF_6BPC:
|
||||
pipe_config->pipe_bpp = 18;
|
||||
break;
|
||||
case PIPECONF_8BPC:
|
||||
pipe_config->pipe_bpp = 24;
|
||||
break;
|
||||
case PIPECONF_10BPC:
|
||||
pipe_config->pipe_bpp = 30;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
intel_get_pipe_timings(crtc, pipe_config);
|
||||
|
||||
i9xx_get_pfit_config(crtc, pipe_config);
|
||||
|
@ -5580,48 +5638,6 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t temp;
|
||||
|
||||
temp = I915_READ(SOUTH_CHICKEN1);
|
||||
if (temp & FDI_BC_BIFURCATION_SELECT)
|
||||
return;
|
||||
|
||||
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
|
||||
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
|
||||
|
||||
temp |= FDI_BC_BIFURCATION_SELECT;
|
||||
DRM_DEBUG_KMS("enabling fdi C rx\n");
|
||||
I915_WRITE(SOUTH_CHICKEN1, temp);
|
||||
POSTING_READ(SOUTH_CHICKEN1);
|
||||
}
|
||||
|
||||
static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
|
||||
{
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
switch (intel_crtc->pipe) {
|
||||
case PIPE_A:
|
||||
break;
|
||||
case PIPE_B:
|
||||
if (intel_crtc->config.fdi_lanes > 2)
|
||||
WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
|
||||
else
|
||||
cpt_enable_fdi_bc_bifurcation(dev);
|
||||
|
||||
break;
|
||||
case PIPE_C:
|
||||
cpt_enable_fdi_bc_bifurcation(dev);
|
||||
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
|
||||
{
|
||||
/*
|
||||
|
@ -5815,9 +5831,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
|
|||
&intel_crtc->config.fdi_m_n);
|
||||
}
|
||||
|
||||
if (IS_IVYBRIDGE(dev))
|
||||
ivybridge_update_fdi_bc_bifurcation(intel_crtc);
|
||||
|
||||
ironlake_set_pipeconf(crtc);
|
||||
|
||||
/* Set up the display plane register */
|
||||
|
@ -5885,6 +5898,23 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
|
|||
if (!(tmp & PIPECONF_ENABLE))
|
||||
return false;
|
||||
|
||||
switch (tmp & PIPECONF_BPC_MASK) {
|
||||
case PIPECONF_6BPC:
|
||||
pipe_config->pipe_bpp = 18;
|
||||
break;
|
||||
case PIPECONF_8BPC:
|
||||
pipe_config->pipe_bpp = 24;
|
||||
break;
|
||||
case PIPECONF_10BPC:
|
||||
pipe_config->pipe_bpp = 30;
|
||||
break;
|
||||
case PIPECONF_12BPC:
|
||||
pipe_config->pipe_bpp = 36;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
|
||||
struct intel_shared_dpll *pll;
|
||||
|
||||
|
@ -8616,6 +8646,9 @@ intel_pipe_config_compare(struct drm_device *dev,
|
|||
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
|
||||
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
|
||||
|
||||
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
|
||||
PIPE_CONF_CHECK_I(pipe_bpp);
|
||||
|
||||
#undef PIPE_CONF_CHECK_X
|
||||
#undef PIPE_CONF_CHECK_I
|
||||
#undef PIPE_CONF_CHECK_FLAGS
|
||||
|
@ -10049,33 +10082,6 @@ static void i915_disable_vga(struct drm_device *dev)
|
|||
POSTING_READ(vga_reg);
|
||||
}
|
||||
|
||||
static void i915_enable_vga_mem(struct drm_device *dev)
|
||||
{
|
||||
/* Enable VGA memory on Intel HD */
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
|
||||
outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
|
||||
vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
|
||||
VGA_RSRC_LEGACY_MEM |
|
||||
VGA_RSRC_NORMAL_IO |
|
||||
VGA_RSRC_NORMAL_MEM);
|
||||
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
|
||||
}
|
||||
}
|
||||
|
||||
void i915_disable_vga_mem(struct drm_device *dev)
|
||||
{
|
||||
/* Disable VGA memory on Intel HD */
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
|
||||
outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
|
||||
vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
|
||||
VGA_RSRC_NORMAL_IO |
|
||||
VGA_RSRC_NORMAL_MEM);
|
||||
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_modeset_init_hw(struct drm_device *dev)
|
||||
{
|
||||
intel_init_power_well(dev);
|
||||
|
@ -10354,7 +10360,6 @@ void i915_redisable_vga(struct drm_device *dev)
|
|||
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
|
||||
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
|
||||
i915_disable_vga(dev);
|
||||
i915_disable_vga_mem(dev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10568,8 +10573,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|||
|
||||
intel_disable_fbc(dev);
|
||||
|
||||
i915_enable_vga_mem(dev);
|
||||
|
||||
intel_disable_gt_powersave(dev);
|
||||
|
||||
ironlake_teardown_rc6(dev);
|
||||
|
|
|
@ -1401,6 +1401,26 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
|
|||
else
|
||||
pipe_config->port_clock = 270000;
|
||||
}
|
||||
|
||||
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
|
||||
pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
|
||||
/*
|
||||
* This is a big fat ugly hack.
|
||||
*
|
||||
* Some machines in UEFI boot mode provide us a VBT that has 18
|
||||
* bpp and 1.62 GHz link bandwidth for eDP, which for reasons
|
||||
* unknown we fail to light up. Yet the same BIOS boots up with
|
||||
* 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
|
||||
* max, not what it tells us to use.
|
||||
*
|
||||
* Note: This will still be broken if the eDP panel is not lit
|
||||
* up by the BIOS, and thus we can't get the mode at module
|
||||
* load.
|
||||
*/
|
||||
DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
|
||||
pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
|
||||
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_edp_psr(struct intel_dp *intel_dp)
|
||||
|
@ -1467,7 +1487,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
|
|||
|
||||
/* Avoid continuous PSR exit by masking memup and hpd */
|
||||
I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
|
||||
EDP_PSR_DEBUG_MASK_HPD);
|
||||
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
|
||||
|
||||
intel_dp->psr_setup_done = true;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue