Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
All three conflicts were cases of simple overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
6abdd5f593
|
@ -94,14 +94,11 @@ has a requirements for a minimum number of vectors the driver can pass a
|
|||
min_vecs argument set to this limit, and the PCI core will return -ENOSPC
|
||||
if it can't meet the minimum number of vectors.
|
||||
|
||||
The flags argument should normally be set to 0, but can be used to pass the
|
||||
PCI_IRQ_NOMSI and PCI_IRQ_NOMSIX flag in case a device claims to support
|
||||
MSI or MSI-X, but the support is broken, or to pass PCI_IRQ_NOLEGACY in
|
||||
case the device does not support legacy interrupt lines.
|
||||
|
||||
By default this function will spread the interrupts around the available
|
||||
CPUs, but this feature can be disabled by passing the PCI_IRQ_NOAFFINITY
|
||||
flag.
|
||||
The flags argument is used to specify which type of interrupt can be used
|
||||
by the device and the driver (PCI_IRQ_LEGACY, PCI_IRQ_MSI, PCI_IRQ_MSIX).
|
||||
A convenient short-hand (PCI_IRQ_ALL_TYPES) is also available to ask for
|
||||
any possible kind of interrupt. If the PCI_IRQ_AFFINITY flag is set,
|
||||
pci_alloc_irq_vectors() will spread the interrupts around the available CPUs.
|
||||
|
||||
To get the Linux IRQ numbers passed to request_irq() and free_irq() and the
|
||||
vectors, use the following function:
|
||||
|
@ -131,7 +128,7 @@ larger than the number supported by the device it will automatically be
|
|||
capped to the supported limit, so there is no need to query the number of
|
||||
vectors supported beforehand:
|
||||
|
||||
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, 0);
|
||||
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_ALL_TYPES)
|
||||
if (nvec < 0)
|
||||
goto out_err;
|
||||
|
||||
|
@ -140,7 +137,7 @@ interrupts it can request a particular number of interrupts by passing that
|
|||
number to pci_alloc_irq_vectors() function as both 'min_vecs' and
|
||||
'max_vecs' parameters:
|
||||
|
||||
ret = pci_alloc_irq_vectors(pdev, nvec, nvec, 0);
|
||||
ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_ALL_TYPES);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
|
||||
|
@ -148,15 +145,14 @@ The most notorious example of the request type described above is enabling
|
|||
the single MSI mode for a device. It could be done by passing two 1s as
|
||||
'min_vecs' and 'max_vecs':
|
||||
|
||||
ret = pci_alloc_irq_vectors(pdev, 1, 1, 0);
|
||||
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
|
||||
Some devices might not support using legacy line interrupts, in which case
|
||||
the PCI_IRQ_NOLEGACY flag can be used to fail the request if the platform
|
||||
can't provide MSI or MSI-X interrupts:
|
||||
the driver can specify that only MSI or MSI-X is acceptable:
|
||||
|
||||
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_NOLEGACY);
|
||||
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_MSI | PCI_IRQ_MSIX);
|
||||
if (nvec < 0)
|
||||
goto out_err;
|
||||
|
||||
|
|
|
@ -53,6 +53,7 @@ stable kernels.
|
|||
| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
|
||||
| ARM | Cortex-A57 | #852523 | N/A |
|
||||
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
|
||||
| ARM | Cortex-A72 | #853709 | N/A |
|
||||
| ARM | MMU-500 | #841119,#826419 | N/A |
|
||||
| | | | |
|
||||
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
|
||||
|
|
|
@ -131,7 +131,7 @@ pygments_style = 'sphinx'
|
|||
todo_include_todos = False
|
||||
|
||||
primary_domain = 'C'
|
||||
highlight_language = 'C'
|
||||
highlight_language = 'guess'
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
|
|
|
@ -8,8 +8,6 @@ Required properties:
|
|||
- interrupts: Interrupt number for McPDM
|
||||
- interrupt-parent: The parent interrupt controller
|
||||
- ti,hwmods: Name of the hwmod associated to the McPDM
|
||||
- clocks: phandle for the pdmclk provider, likely <&twl6040>
|
||||
- clock-names: Must be "pdmclk"
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -21,11 +19,3 @@ mcpdm: mcpdm@40132000 {
|
|||
interrupt-parent = <&gic>;
|
||||
ti,hwmods = "mcpdm";
|
||||
};
|
||||
|
||||
In board DTS file the pdmclk needs to be added:
|
||||
|
||||
&mcpdm {
|
||||
clocks = <&twl6040>;
|
||||
clock-names = "pdmclk";
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
@ -62,7 +62,7 @@ For more examples of cooling devices, refer to the example sections below.
|
|||
Required properties:
|
||||
- #cooling-cells: Used to provide cooling device specific information
|
||||
Type: unsigned while referring to it. Must be at least 2, in order
|
||||
Size: one cell to specify minimum and maximum cooling state used
|
||||
Size: one cell to specify minimum and maximum cooling state used
|
||||
in the reference. The first cell is the minimum
|
||||
cooling state requested and the second cell is
|
||||
the maximum cooling state requested in the reference.
|
||||
|
@ -119,7 +119,7 @@ Required properties:
|
|||
Optional property:
|
||||
- contribution: The cooling contribution to the thermal zone of the
|
||||
Type: unsigned referred cooling device at the referred trip point.
|
||||
Size: one cell The contribution is a ratio of the sum
|
||||
Size: one cell The contribution is a ratio of the sum
|
||||
of all cooling contributions within a thermal zone.
|
||||
|
||||
Note: Using the THERMAL_NO_LIMIT (-1UL) constant in the cooling-device phandle
|
||||
|
@ -145,7 +145,7 @@ Required properties:
|
|||
Size: one cell
|
||||
|
||||
- thermal-sensors: A list of thermal sensor phandles and sensor specifier
|
||||
Type: list of used while monitoring the thermal zone.
|
||||
Type: list of used while monitoring the thermal zone.
|
||||
phandles + sensor
|
||||
specifier
|
||||
|
||||
|
@ -473,7 +473,7 @@ thermal-zones {
|
|||
<&adc>; /* pcb north */
|
||||
|
||||
/* hotspot = 100 * bandgap - 120 * adc + 484 */
|
||||
coefficients = <100 -120 484>;
|
||||
coefficients = <100 -120 484>;
|
||||
|
||||
trips {
|
||||
...
|
||||
|
@ -502,7 +502,7 @@ from the ADC sensor. The binding would be then:
|
|||
thermal-sensors = <&adc>;
|
||||
|
||||
/* hotspot = 1 * adc + 6000 */
|
||||
coefficients = <1 6000>;
|
||||
coefficients = <1 6000>;
|
||||
|
||||
(d) - Board thermal
|
||||
|
||||
|
|
|
@ -19,5 +19,5 @@ enhancements. It can monitor up to 4 voltages, 16 temperatures and
|
|||
implemented in this driver.
|
||||
|
||||
Specification of the chip can be found here:
|
||||
ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
|
||||
ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
|
||||
ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
|
||||
ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
|
||||
|
|
|
@ -366,8 +366,6 @@ Domain`_ references.
|
|||
Cross-referencing from reStructuredText
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. highlight:: none
|
||||
|
||||
To cross-reference the functions and types defined in the kernel-doc comments
|
||||
from reStructuredText documents, please use the `Sphinx C Domain`_
|
||||
references. For example::
|
||||
|
@ -390,8 +388,6 @@ For further details, please refer to the `Sphinx C Domain`_ documentation.
|
|||
Function documentation
|
||||
----------------------
|
||||
|
||||
.. highlight:: c
|
||||
|
||||
The general format of a function and function-like macro kernel-doc comment is::
|
||||
|
||||
/**
|
||||
|
@ -572,8 +568,6 @@ DocBook XML [DEPRECATED]
|
|||
Converting DocBook to Sphinx
|
||||
----------------------------
|
||||
|
||||
.. highlight:: none
|
||||
|
||||
Over time, we expect all of the documents under ``Documentation/DocBook`` to be
|
||||
converted to Sphinx and reStructuredText. For most DocBook XML documents, a good
|
||||
enough solution is to use the simple ``Documentation/sphinx/tmplcvt`` script,
|
||||
|
|
|
@ -3032,6 +3032,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
PAGE_SIZE is used as alignment.
|
||||
PCI-PCI bridge can be specified, if resource
|
||||
windows need to be expanded.
|
||||
To specify the alignment for several
|
||||
instances of a device, the PCI vendor,
|
||||
device, subvendor, and subdevice may be
|
||||
specified, e.g., 4096@pci:8086:9c22:103c:198f
|
||||
ecrc= Enable/disable PCIe ECRC (transaction layer
|
||||
end-to-end CRC checking).
|
||||
bios: Use BIOS/firmware settings. This is the
|
||||
|
|
|
@ -587,26 +587,6 @@ of DSA, would be the its port-based VLAN, used by the associated bridge device.
|
|||
TODO
|
||||
====
|
||||
|
||||
The platform device problem
|
||||
---------------------------
|
||||
DSA is currently implemented as a platform device driver which is far from ideal
|
||||
as was discussed in this thread:
|
||||
|
||||
http://permalink.gmane.org/gmane.linux.network/329848
|
||||
|
||||
This basically prevents the device driver model to be properly used and applied,
|
||||
and support non-MDIO, non-MMIO Ethernet connected switches.
|
||||
|
||||
Another problem with the platform device driver approach is that it prevents the
|
||||
use of a modular switch drivers build due to a circular dependency, illustrated
|
||||
here:
|
||||
|
||||
http://comments.gmane.org/gmane.linux.network/345803
|
||||
|
||||
Attempts of reworking this has been done here:
|
||||
|
||||
https://lwn.net/Articles/643149/
|
||||
|
||||
Making SWITCHDEV and DSA converge towards an unified codebase
|
||||
-------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -164,7 +164,32 @@ load n/2 modules more and try again.
|
|||
Again, if you find the offending module(s), it(they) must be unloaded every time
|
||||
before hibernation, and please report the problem with it(them).
|
||||
|
||||
c) Advanced debugging
|
||||
c) Using the "test_resume" hibernation option
|
||||
|
||||
/sys/power/disk generally tells the kernel what to do after creating a
|
||||
hibernation image. One of the available options is "test_resume" which
|
||||
causes the just created image to be used for immediate restoration. Namely,
|
||||
after doing:
|
||||
|
||||
# echo test_resume > /sys/power/disk
|
||||
# echo disk > /sys/power/state
|
||||
|
||||
a hibernation image will be created and a resume from it will be triggered
|
||||
immediately without involving the platform firmware in any way.
|
||||
|
||||
That test can be used to check if failures to resume from hibernation are
|
||||
related to bad interactions with the platform firmware. That is, if the above
|
||||
works every time, but resume from actual hibernation does not work or is
|
||||
unreliable, the platform firmware may be responsible for the failures.
|
||||
|
||||
On architectures and platforms that support using different kernels to restore
|
||||
hibernation images (that is, the kernel used to read the image from storage and
|
||||
load it into memory is different from the one included in the image) or support
|
||||
kernel address space randomization, it also can be used to check if failures
|
||||
to resume may be related to the differences between the restore and image
|
||||
kernels.
|
||||
|
||||
d) Advanced debugging
|
||||
|
||||
In case that hibernation does not work on your system even in the minimal
|
||||
configuration and compiling more drivers as modules is not practical or some
|
||||
|
|
|
@ -1,75 +1,76 @@
|
|||
Power Management Interface
|
||||
Power Management Interface for System Sleep
|
||||
|
||||
Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
|
||||
The power management subsystem provides a unified sysfs interface to
|
||||
userspace, regardless of what architecture or platform one is
|
||||
running. The interface exists in /sys/power/ directory (assuming sysfs
|
||||
is mounted at /sys).
|
||||
The power management subsystem provides userspace with a unified sysfs interface
|
||||
for system sleep regardless of the underlying system architecture or platform.
|
||||
The interface is located in the /sys/power/ directory (assuming that sysfs is
|
||||
mounted at /sys).
|
||||
|
||||
/sys/power/state controls system power state. Reading from this file
|
||||
returns what states are supported, which is hard-coded to 'freeze',
|
||||
'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
|
||||
(Suspend-to-Disk).
|
||||
/sys/power/state is the system sleep state control file.
|
||||
|
||||
Writing to this file one of those strings causes the system to
|
||||
transition into that state. Please see the file
|
||||
Documentation/power/states.txt for a description of each of those
|
||||
states.
|
||||
Reading from it returns a list of supported sleep states, encoded as:
|
||||
|
||||
'freeze' (Suspend-to-Idle)
|
||||
'standby' (Power-On Suspend)
|
||||
'mem' (Suspend-to-RAM)
|
||||
'disk' (Suspend-to-Disk)
|
||||
|
||||
/sys/power/disk controls the operating mode of the suspend-to-disk
|
||||
mechanism. Suspend-to-disk can be handled in several ways. We have a
|
||||
few options for putting the system to sleep - using the platform driver
|
||||
(e.g. ACPI or other suspend_ops), powering off the system or rebooting the
|
||||
system (for testing).
|
||||
Suspend-to-Idle is always supported. Suspend-to-Disk is always supported
|
||||
too as long the kernel has been configured to support hibernation at all
|
||||
(ie. CONFIG_HIBERNATION is set in the kernel configuration file). Support
|
||||
for Suspend-to-RAM and Power-On Suspend depends on the capabilities of the
|
||||
platform.
|
||||
|
||||
Additionally, /sys/power/disk can be used to turn on one of the two testing
|
||||
modes of the suspend-to-disk mechanism: 'testproc' or 'test'. If the
|
||||
suspend-to-disk mechanism is in the 'testproc' mode, writing 'disk' to
|
||||
/sys/power/state will cause the kernel to disable nonboot CPUs and freeze
|
||||
tasks, wait for 5 seconds, unfreeze tasks and enable nonboot CPUs. If it is
|
||||
in the 'test' mode, writing 'disk' to /sys/power/state will cause the kernel
|
||||
to disable nonboot CPUs and freeze tasks, shrink memory, suspend devices, wait
|
||||
for 5 seconds, resume devices, unfreeze tasks and enable nonboot CPUs. Then,
|
||||
we are able to look in the log messages and work out, for example, which code
|
||||
is being slow and which device drivers are misbehaving.
|
||||
If one of the strings listed in /sys/power/state is written to it, the system
|
||||
will attempt to transition into the corresponding sleep state. Refer to
|
||||
Documentation/power/states.txt for a description of each of those states.
|
||||
|
||||
Reading from this file will display all supported modes and the currently
|
||||
selected one in brackets, for example
|
||||
/sys/power/disk controls the operating mode of hibernation (Suspend-to-Disk).
|
||||
Specifically, it tells the kernel what to do after creating a hibernation image.
|
||||
|
||||
[shutdown] reboot test testproc
|
||||
Reading from it returns a list of supported options encoded as:
|
||||
|
||||
Writing to this file will accept one of
|
||||
'platform' (put the system into sleep using a platform-provided method)
|
||||
'shutdown' (shut the system down)
|
||||
'reboot' (reboot the system)
|
||||
'suspend' (trigger a Suspend-to-RAM transition)
|
||||
'test_resume' (resume-after-hibernation test mode)
|
||||
|
||||
'platform' (only if the platform supports it)
|
||||
'shutdown'
|
||||
'reboot'
|
||||
'testproc'
|
||||
'test'
|
||||
The currently selected option is printed in square brackets.
|
||||
|
||||
/sys/power/image_size controls the size of the image created by
|
||||
the suspend-to-disk mechanism. It can be written a string
|
||||
representing a non-negative integer that will be used as an upper
|
||||
limit of the image size, in bytes. The suspend-to-disk mechanism will
|
||||
do its best to ensure the image size will not exceed that number. However,
|
||||
if this turns out to be impossible, it will try to suspend anyway using the
|
||||
smallest image possible. In particular, if "0" is written to this file, the
|
||||
suspend image will be as small as possible.
|
||||
The 'platform' option is only available if the platform provides a special
|
||||
mechanism to put the system to sleep after creating a hibernation image (ACPI
|
||||
does that, for example). The 'suspend' option is available if Suspend-to-RAM
|
||||
is supported. Refer to Documentation/power/basic_pm_debugging.txt for the
|
||||
description of the 'test_resume' option.
|
||||
|
||||
Reading from this file will display the current image size limit, which
|
||||
is set to 2/5 of available RAM by default.
|
||||
To select an option, write the string representing it to /sys/power/disk.
|
||||
|
||||
/sys/power/pm_trace controls the code which saves the last PM event point in
|
||||
the RTC across reboots, so that you can debug a machine that just hangs
|
||||
during suspend (or more commonly, during resume). Namely, the RTC is only
|
||||
used to save the last PM event point if this file contains '1'. Initially it
|
||||
contains '0' which may be changed to '1' by writing a string representing a
|
||||
nonzero integer into it.
|
||||
/sys/power/image_size controls the size of hibernation images.
|
||||
|
||||
To use this debugging feature you should attempt to suspend the machine, then
|
||||
reboot it and run
|
||||
It can be written a string representing a non-negative integer that will be
|
||||
used as a best-effort upper limit of the image size, in bytes. The hibernation
|
||||
core will do its best to ensure that the image size will not exceed that number.
|
||||
However, if that turns out to be impossible to achieve, a hibernation image will
|
||||
still be created and its size will be as small as possible. In particular,
|
||||
writing '0' to this file will enforce hibernation images to be as small as
|
||||
possible.
|
||||
|
||||
dmesg -s 1000000 | grep 'hash matches'
|
||||
Reading from this file returns the current image size limit, which is set to
|
||||
around 2/5 of available RAM by default.
|
||||
|
||||
CAUTION: Using it will cause your machine's real-time (CMOS) clock to be
|
||||
set to a random invalid time after a resume.
|
||||
/sys/power/pm_trace controls the PM trace mechanism saving the last suspend
|
||||
or resume event point in the RTC across reboots.
|
||||
|
||||
It helps to debug hard lockups or reboots due to device driver failures that
|
||||
occur during system suspend or resume (which is more common) more effectively.
|
||||
|
||||
If /sys/power/pm_trace contains '1', the fingerprint of each suspend/resume
|
||||
event point in turn will be stored in the RTC memory (overwriting the actual
|
||||
RTC information), so it will survive a system crash if one occurs right after
|
||||
storing it and it can be used later to identify the driver that caused the crash
|
||||
to happen (see Documentation/power/s2ram.txt for more information).
|
||||
|
||||
Initially it contains '0' which may be changed to '1' by writing a string
|
||||
representing a nonzero integer into it.
|
||||
|
|
|
@ -167,6 +167,8 @@ signal will be rolled back anyway.
|
|||
For signals taken in non-TM or suspended mode, we use the
|
||||
normal/non-checkpointed stack pointer.
|
||||
|
||||
Any transaction initiated inside a sighandler and suspended on return
|
||||
from the sighandler to the kernel will get reclaimed and discarded.
|
||||
|
||||
Failure cause codes used by kernel
|
||||
==================================
|
||||
|
|
|
@ -42,11 +42,12 @@
|
|||
caption a.headerlink { opacity: 0; }
|
||||
caption a.headerlink:hover { opacity: 1; }
|
||||
|
||||
/* inline literal: drop the borderbox and red color */
|
||||
/* inline literal: drop the borderbox, padding and red color */
|
||||
|
||||
code, .rst-content tt, .rst-content code {
|
||||
color: inherit;
|
||||
border: none;
|
||||
padding: unset;
|
||||
background: inherit;
|
||||
font-size: 85%;
|
||||
}
|
||||
|
|
17
MAINTAINERS
17
MAINTAINERS
|
@ -890,6 +890,15 @@ S: Supported
|
|||
F: drivers/gpu/drm/arc/
|
||||
F: Documentation/devicetree/bindings/display/snps,arcpgu.txt
|
||||
|
||||
ARM ARCHITECTED TIMER DRIVER
|
||||
M: Mark Rutland <mark.rutland@arm.com>
|
||||
M: Marc Zyngier <marc.zyngier@arm.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/include/asm/arch_timer.h
|
||||
F: arch/arm64/include/asm/arch_timer.h
|
||||
F: drivers/clocksource/arm_arch_timer.c
|
||||
|
||||
ARM HDLCD DRM DRIVER
|
||||
M: Liviu Dudau <liviu.dudau@arm.com>
|
||||
S: Supported
|
||||
|
@ -4534,6 +4543,12 @@ L: linux-edac@vger.kernel.org
|
|||
S: Maintained
|
||||
F: drivers/edac/sb_edac.c
|
||||
|
||||
EDAC-SKYLAKE
|
||||
M: Tony Luck <tony.luck@intel.com>
|
||||
L: linux-edac@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/edac/skx_edac.c
|
||||
|
||||
EDAC-XGENE
|
||||
APPLIED MICRO (APM) X-GENE SOC EDAC
|
||||
M: Loc Ho <lho@apm.com>
|
||||
|
@ -7664,7 +7679,7 @@ L: linux-rdma@vger.kernel.org
|
|||
S: Supported
|
||||
W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
|
||||
Q: http://patchwork.kernel.org/project/linux-rdma/list/
|
||||
F: drivers/infiniband/hw/rxe/
|
||||
F: drivers/infiniband/sw/rxe/
|
||||
F: include/uapi/rdma/rdma_user_rxe.h
|
||||
|
||||
MEMBARRIER SUPPORT
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 8
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Psychotic Stoned Sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -142,7 +142,7 @@
|
|||
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
; Retrieve orig r25 and save it with rest of callee_regs
|
||||
ld.as r12, [r12, PT_user_r25]
|
||||
ld r12, [r12, PT_user_r25]
|
||||
PUSH r12
|
||||
#else
|
||||
PUSH r25
|
||||
|
@ -198,7 +198,7 @@
|
|||
|
||||
; SP is back to start of pt_regs
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
st.as r12, [sp, PT_user_r25]
|
||||
st r12, [sp, PT_user_r25]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
|
|
@ -188,10 +188,10 @@ static inline int arch_irqs_disabled(void)
|
|||
.endm
|
||||
|
||||
.macro IRQ_ENABLE scratch
|
||||
TRACE_ASM_IRQ_ENABLE
|
||||
lr \scratch, [status32]
|
||||
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
|
||||
flag \scratch
|
||||
TRACE_ASM_IRQ_ENABLE
|
||||
.endm
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
|
|||
|
||||
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
||||
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
|
||||
#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
|
||||
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
|
||||
|
||||
/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
|
||||
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
|
||||
|
|
|
@ -13,8 +13,15 @@
|
|||
|
||||
/* Machine specific ELF Hdr flags */
|
||||
#define EF_ARC_OSABI_MSK 0x00000f00
|
||||
#define EF_ARC_OSABI_ORIG 0x00000000 /* MUST be zero for back-compat */
|
||||
#define EF_ARC_OSABI_CURRENT 0x00000300 /* v3 (no legacy syscalls) */
|
||||
|
||||
#define EF_ARC_OSABI_V3 0x00000300 /* v3 (no legacy syscalls) */
|
||||
#define EF_ARC_OSABI_V4 0x00000400 /* v4 (64bit data any reg align) */
|
||||
|
||||
#if __GNUC__ < 6
|
||||
#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V3
|
||||
#else
|
||||
#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V4
|
||||
#endif
|
||||
|
||||
typedef unsigned long elf_greg_t;
|
||||
typedef unsigned long elf_fpregset_t;
|
||||
|
|
|
@ -28,6 +28,7 @@ extern void __muldf3(void);
|
|||
extern void __divdf3(void);
|
||||
extern void __floatunsidf(void);
|
||||
extern void __floatunsisf(void);
|
||||
extern void __udivdi3(void);
|
||||
|
||||
EXPORT_SYMBOL(__ashldi3);
|
||||
EXPORT_SYMBOL(__ashrdi3);
|
||||
|
@ -45,6 +46,7 @@ EXPORT_SYMBOL(__muldf3);
|
|||
EXPORT_SYMBOL(__divdf3);
|
||||
EXPORT_SYMBOL(__floatunsidf);
|
||||
EXPORT_SYMBOL(__floatunsisf);
|
||||
EXPORT_SYMBOL(__udivdi3);
|
||||
|
||||
/* ARC optimised assembler routines */
|
||||
EXPORT_SYMBOL(memset);
|
||||
|
|
|
@ -199,7 +199,7 @@ int elf_check_arch(const struct elf32_hdr *x)
|
|||
}
|
||||
|
||||
eflags = x->e_flags;
|
||||
if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
|
||||
if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
|
||||
pr_err("ABI mismatch - you need newer toolchain\n");
|
||||
force_sigsegv(SIGSEGV, current);
|
||||
return 0;
|
||||
|
|
|
@ -291,8 +291,10 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
|
|||
cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
|
||||
cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
|
||||
|
||||
n += scnprintf(buf + n, len - n,
|
||||
"OS ABI [v3]\t: no-legacy-syscalls\n");
|
||||
n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
|
||||
EF_ARC_OSABI_CURRENT >> 8,
|
||||
EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
|
||||
"no-legacy-syscalls" : "64-bit data any register aligned");
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
|
|
@ -921,6 +921,15 @@ void arc_cache_init(void)
|
|||
|
||||
printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
|
||||
|
||||
/*
|
||||
* Only master CPU needs to execute rest of function:
|
||||
* - Assume SMP so all cores will have same cache config so
|
||||
* any geomtry checks will be same for all
|
||||
* - IOC setup / dma callbacks only need to be setup once
|
||||
*/
|
||||
if (cpu)
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
|
||||
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
|
||||
|
||||
|
|
|
@ -61,6 +61,7 @@ void *kmap(struct page *page)
|
|||
|
||||
return kmap_high(page);
|
||||
}
|
||||
EXPORT_SYMBOL(kmap);
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
{
|
||||
|
|
|
@ -295,6 +295,7 @@ __und_svc_fault:
|
|||
bl __und_fault
|
||||
|
||||
__und_svc_finish:
|
||||
get_thread_info tsk
|
||||
ldr r5, [sp, #S_PSR] @ Get SVC cpsr
|
||||
svc_exit r5 @ return from exception
|
||||
UNWIND(.fnend )
|
||||
|
|
|
@ -1309,7 +1309,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
smp_rmb();
|
||||
|
||||
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
|
||||
if (is_error_pfn(pfn))
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
return -EFAULT;
|
||||
|
||||
if (kvm_is_device_pfn(pfn)) {
|
||||
|
|
|
@ -271,6 +271,12 @@ static int __init imx_gpc_init(struct device_node *node,
|
|||
for (i = 0; i < IMR_NUM; i++)
|
||||
writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4);
|
||||
|
||||
/*
|
||||
* Clear the OF_POPULATED flag set in of_irq_init so that
|
||||
* later the GPC power domain driver will not be skipped.
|
||||
*/
|
||||
of_node_clear_flag(node, OF_POPULATED);
|
||||
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init);
|
||||
|
|
|
@ -83,7 +83,8 @@ static struct resource smc91x_resources[] = {
|
|||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
|
||||
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
|
||||
SMC91X_USE_DMA | SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
|
|
|
@ -120,7 +120,8 @@ static struct resource smc91x_resources[] = {
|
|||
};
|
||||
|
||||
static struct smc91x_platdata xcep_smc91x_info = {
|
||||
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA,
|
||||
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
|
||||
SMC91X_NOWAIT | SMC91X_USE_DMA,
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
|
|
|
@ -93,7 +93,8 @@ static struct smsc911x_platform_config smsc911x_config = {
|
|||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
|
||||
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
|
||||
SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device realview_eth_device = {
|
||||
|
|
|
@ -45,7 +45,7 @@ static struct resource smc91x_resources[] = {
|
|||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
|
||||
.flags = SMC91X_USE_16BIT | SMC91X_USE_8BIT | SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
|
|
|
@ -728,7 +728,8 @@ static void *__init late_alloc(unsigned long sz)
|
|||
{
|
||||
void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz));
|
||||
|
||||
BUG_ON(!ptr);
|
||||
if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
|
||||
BUG();
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
@ -1155,10 +1156,19 @@ void __init sanity_check_meminfo(void)
|
|||
{
|
||||
phys_addr_t memblock_limit = 0;
|
||||
int highmem = 0;
|
||||
phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
|
||||
u64 vmalloc_limit;
|
||||
struct memblock_region *reg;
|
||||
bool should_use_highmem = false;
|
||||
|
||||
/*
|
||||
* Let's use our own (unoptimized) equivalent of __pa() that is
|
||||
* not affected by wrap-arounds when sizeof(phys_addr_t) == 4.
|
||||
* The result is used as the upper bound on physical memory address
|
||||
* and may itself be outside the valid range for which phys_addr_t
|
||||
* and therefore __pa() is defined.
|
||||
*/
|
||||
vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
phys_addr_t block_start = reg->base;
|
||||
phys_addr_t block_end = reg->base + reg->size;
|
||||
|
@ -1183,10 +1193,11 @@ void __init sanity_check_meminfo(void)
|
|||
if (reg->size > size_limit) {
|
||||
phys_addr_t overlap_size = reg->size - size_limit;
|
||||
|
||||
pr_notice("Truncating RAM at %pa-%pa to -%pa",
|
||||
&block_start, &block_end, &vmalloc_limit);
|
||||
memblock_remove(vmalloc_limit, overlap_size);
|
||||
pr_notice("Truncating RAM at %pa-%pa",
|
||||
&block_start, &block_end);
|
||||
block_end = vmalloc_limit;
|
||||
pr_cont(" to -%pa", &block_end);
|
||||
memblock_remove(vmalloc_limit, overlap_size);
|
||||
should_use_highmem = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
|
|||
static struct vcpu_info __percpu *xen_vcpu_info;
|
||||
|
||||
/* Linux <-> Xen vCPU id mapping */
|
||||
DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
|
||||
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
|
||||
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
|
||||
|
||||
/* These are unused until we support booting "pre-ballooned" */
|
||||
|
|
|
@ -757,6 +757,9 @@ ENTRY(__enable_mmu)
|
|||
isb
|
||||
bl __create_page_tables // recreate kernel mapping
|
||||
|
||||
tlbi vmalle1 // Remove any stale TLB entries
|
||||
dsb nsh
|
||||
|
||||
msr sctlr_el1, x19 // re-enable the MMU
|
||||
isb
|
||||
ic iallu // flush instructions fetched
|
||||
|
|
|
@ -101,12 +101,20 @@ ENTRY(cpu_resume)
|
|||
bl el2_setup // if in EL2 drop to EL1 cleanly
|
||||
/* enable the MMU early - so we can access sleep_save_stash by va */
|
||||
adr_l lr, __enable_mmu /* __cpu_setup will return here */
|
||||
ldr x27, =_cpu_resume /* __enable_mmu will branch here */
|
||||
adr_l x27, _resume_switched /* __enable_mmu will branch here */
|
||||
adrp x25, idmap_pg_dir
|
||||
adrp x26, swapper_pg_dir
|
||||
b __cpu_setup
|
||||
ENDPROC(cpu_resume)
|
||||
|
||||
.pushsection ".idmap.text", "ax"
|
||||
_resume_switched:
|
||||
ldr x8, =_cpu_resume
|
||||
br x8
|
||||
ENDPROC(_resume_switched)
|
||||
.ltorg
|
||||
.popsection
|
||||
|
||||
ENTRY(_cpu_resume)
|
||||
mrs x1, mpidr_el1
|
||||
adrp x8, mpidr_hash
|
||||
|
|
|
@ -256,7 +256,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
|
|||
|
||||
/*
|
||||
* We must restore the 32-bit state before the sysregs, thanks
|
||||
* to Cortex-A57 erratum #852523.
|
||||
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
|
||||
*/
|
||||
__sysreg32_restore_state(vcpu);
|
||||
__sysreg_restore_guest_state(guest_ctxt);
|
||||
|
|
|
@ -823,14 +823,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
* Architected system registers.
|
||||
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
|
||||
*
|
||||
* We could trap ID_DFR0 and tell the guest we don't support performance
|
||||
* monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
|
||||
* NAKed, so it will read the PMCR anyway.
|
||||
*
|
||||
* Therefore we tell the guest we have 0 counters. Unfortunately, we
|
||||
* must always support PMCCNTR (the cycle counter): we just RAZ/WI for
|
||||
* all PM registers, which doesn't crash the guest kernel at least.
|
||||
*
|
||||
* Debug handling: We do trap most, if not all debug related system
|
||||
* registers. The implementation is good enough to ensure that a guest
|
||||
* can use these with minimal performance degradation. The drawback is
|
||||
|
@ -1360,7 +1352,7 @@ static const struct sys_reg_desc cp15_regs[] = {
|
|||
{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
|
||||
|
||||
/* ICC_SRE */
|
||||
{ Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
|
||||
{ Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
|
||||
|
||||
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
|
||||
|
||||
|
|
|
@ -242,7 +242,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
|
|||
|
||||
static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
|
||||
{
|
||||
pte_t *pte = pte_offset_kernel(pmd, 0);
|
||||
pte_t *pte = pte_offset_kernel(pmd, 0UL);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
|
@ -254,7 +254,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
|
|||
|
||||
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
|
||||
{
|
||||
pmd_t *pmd = pmd_offset(pud, 0);
|
||||
pmd_t *pmd = pmd_offset(pud, 0UL);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
|
@ -271,7 +271,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
|
|||
|
||||
static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
||||
{
|
||||
pud_t *pud = pud_offset(pgd, 0);
|
||||
pud_t *pud = pud_offset(pgd, 0UL);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#include <asm/acpi.h>
|
||||
|
||||
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
|
||||
EXPORT_SYMBOL(node_data);
|
||||
nodemask_t numa_nodes_parsed __initdata;
|
||||
|
|
|
@ -146,7 +146,8 @@ static struct platform_device hitachi_fb_device = {
|
|||
#include <linux/smc91x.h>
|
||||
|
||||
static struct smc91x_platdata smc91x_info = {
|
||||
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
|
||||
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
|
||||
SMC91X_NOWAIT,
|
||||
.leda = RPC_LED_100_10,
|
||||
.ledb = RPC_LED_TX_RX,
|
||||
};
|
||||
|
|
|
@ -134,7 +134,8 @@ static struct platform_device net2272_bfin_device = {
|
|||
#include <linux/smc91x.h>
|
||||
|
||||
static struct smc91x_platdata smc91x_info = {
|
||||
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
|
||||
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
|
||||
SMC91X_NOWAIT,
|
||||
.leda = RPC_LED_100_10,
|
||||
.ledb = RPC_LED_TX_RX,
|
||||
};
|
||||
|
|
|
@ -164,7 +164,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
|
|||
*/
|
||||
static inline unsigned long ___pa(unsigned long x)
|
||||
{
|
||||
if (config_enabled(CONFIG_64BIT)) {
|
||||
if (IS_ENABLED(CONFIG_64BIT)) {
|
||||
/*
|
||||
* For MIPS64 the virtual address may either be in one of
|
||||
* the compatibility segements ckseg0 or ckseg1, or it may
|
||||
|
@ -173,7 +173,7 @@ static inline unsigned long ___pa(unsigned long x)
|
|||
return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
|
||||
}
|
||||
|
||||
if (!config_enabled(CONFIG_EVA)) {
|
||||
if (!IS_ENABLED(CONFIG_EVA)) {
|
||||
/*
|
||||
* We're using the standard MIPS32 legacy memory map, ie.
|
||||
* the address x is going to be in kseg0 or kseg1. We can
|
||||
|
|
|
@ -40,7 +40,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
|
|||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
pfn = gfn_to_pfn(kvm, gfn);
|
||||
|
||||
if (is_error_pfn(pfn)) {
|
||||
if (is_error_noslot_pfn(pfn)) {
|
||||
kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
|
|
|
@ -97,10 +97,10 @@
|
|||
#define ENOTCONN 235 /* Transport endpoint is not connected */
|
||||
#define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */
|
||||
#define ETOOMANYREFS 237 /* Too many references: cannot splice */
|
||||
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
|
||||
#define ETIMEDOUT 238 /* Connection timed out */
|
||||
#define ECONNREFUSED 239 /* Connection refused */
|
||||
#define EREMOTERELEASE 240 /* Remote peer released connection */
|
||||
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
|
||||
#define EREMOTERELEASE 240 /* Remote peer released connection */
|
||||
#define EHOSTDOWN 241 /* Host is down */
|
||||
#define EHOSTUNREACH 242 /* No route to host */
|
||||
|
||||
|
|
|
@ -51,8 +51,6 @@ EXPORT_SYMBOL(_parisc_requires_coherency);
|
|||
|
||||
DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
|
||||
|
||||
extern int update_cr16_clocksource(void); /* from time.c */
|
||||
|
||||
/*
|
||||
** PARISC CPU driver - claim "device" and initialize CPU data structures.
|
||||
**
|
||||
|
@ -228,12 +226,6 @@ static int processor_probe(struct parisc_device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
/* If we've registered more than one cpu,
|
||||
* we'll use the jiffies clocksource since cr16
|
||||
* is not synchronized between CPUs.
|
||||
*/
|
||||
update_cr16_clocksource();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -221,18 +221,6 @@ static struct clocksource clocksource_cr16 = {
|
|||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
int update_cr16_clocksource(void)
|
||||
{
|
||||
/* since the cr16 cycle counters are not synchronized across CPUs,
|
||||
we'll check if we should switch to a safe clocksource: */
|
||||
if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
|
||||
clocksource_change_rating(&clocksource_cr16, 0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init start_cpu_itimer(void)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/cpumask.h>
|
||||
#include <asm/cpu_has_feature.h>
|
||||
|
||||
/*
|
||||
* Mapping of threads to cores
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#ifndef __ASM_PPC64_HMI_H__
|
||||
#define __ASM_PPC64_HMI_H__
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
|
||||
#define CORE_TB_RESYNC_REQ_BIT 63
|
||||
#define MAX_SUBCORE_PER_CORE 4
|
||||
|
|
|
@ -183,11 +183,6 @@ struct paca_struct {
|
|||
*/
|
||||
u16 in_mce;
|
||||
u8 hmi_event_available; /* HMI event is available */
|
||||
/*
|
||||
* Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
|
||||
* more details
|
||||
*/
|
||||
struct sibling_subcore_state *sibling_subcore_state;
|
||||
#endif
|
||||
|
||||
/* Stuff for accurate time accounting */
|
||||
|
@ -202,6 +197,13 @@ struct paca_struct {
|
|||
struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
|
||||
#endif
|
||||
struct kvmppc_host_state kvm_hstate;
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
/*
|
||||
* Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
|
||||
* more details
|
||||
*/
|
||||
struct sibling_subcore_state *sibling_subcore_state;
|
||||
#endif
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -301,6 +301,7 @@ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
|
|||
/* Allocate & free a PCI host bridge structure */
|
||||
extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
|
||||
extern void pcibios_free_controller(struct pci_controller *phb);
|
||||
extern void pcibios_free_controller_deferred(struct pci_host_bridge *bridge);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
extern int pcibios_vaddr_is_ioport(void __iomem *address);
|
||||
|
|
|
@ -41,7 +41,7 @@ obj-$(CONFIG_VDSO32) += vdso32/
|
|||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
||||
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
|
||||
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
|
||||
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o hmi.o
|
||||
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
|
||||
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
|
||||
obj-$(CONFIG_PPC64) += vdso64/
|
||||
obj-$(CONFIG_ALTIVEC) += vecemu.o
|
||||
|
|
|
@ -368,13 +368,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|||
tabort_syscall:
|
||||
/* Firstly we need to enable TM in the kernel */
|
||||
mfmsr r10
|
||||
li r13, 1
|
||||
rldimi r10, r13, MSR_TM_LG, 63-MSR_TM_LG
|
||||
li r9, 1
|
||||
rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
|
||||
mtmsrd r10, 0
|
||||
|
||||
/* tabort, this dooms the transaction, nothing else */
|
||||
li r13, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
|
||||
TABORT(R13)
|
||||
li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
|
||||
TABORT(R9)
|
||||
|
||||
/*
|
||||
* Return directly to userspace. We have corrupted user register state,
|
||||
|
@ -382,8 +382,8 @@ tabort_syscall:
|
|||
* resume after the tbegin of the aborted transaction with the
|
||||
* checkpointed register state.
|
||||
*/
|
||||
li r13, MSR_RI
|
||||
andc r10, r10, r13
|
||||
li r9, MSR_RI
|
||||
andc r10, r10, r9
|
||||
mtmsrd r10, 1
|
||||
mtspr SPRN_SRR0, r11
|
||||
mtspr SPRN_SRR1, r12
|
||||
|
|
|
@ -485,7 +485,23 @@ machine_check_fwnmi:
|
|||
EXCEPTION_PROLOG_0(PACA_EXMC)
|
||||
machine_check_pSeries_0:
|
||||
EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
|
||||
EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
|
||||
/*
|
||||
* The following is essentially EXCEPTION_PROLOG_PSERIES_1 with the
|
||||
* difference that MSR_RI is not enabled, because PACA_EXMC is being
|
||||
* used, so nested machine check corrupts it. machine_check_common
|
||||
* enables MSR_RI.
|
||||
*/
|
||||
ld r12,PACAKBASE(r13)
|
||||
ld r10,PACAKMSR(r13)
|
||||
xori r10,r10,MSR_RI
|
||||
mfspr r11,SPRN_SRR0
|
||||
LOAD_HANDLER(r12, machine_check_common)
|
||||
mtspr SPRN_SRR0,r12
|
||||
mfspr r12,SPRN_SRR1
|
||||
mtspr SPRN_SRR1,r10
|
||||
rfid
|
||||
b . /* prevent speculative execution */
|
||||
|
||||
KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
|
||||
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
|
||||
KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
|
||||
|
@ -969,14 +985,17 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
|
|||
machine_check_common:
|
||||
|
||||
mfspr r10,SPRN_DAR
|
||||
std r10,PACA_EXGEN+EX_DAR(r13)
|
||||
std r10,PACA_EXMC+EX_DAR(r13)
|
||||
mfspr r10,SPRN_DSISR
|
||||
stw r10,PACA_EXGEN+EX_DSISR(r13)
|
||||
stw r10,PACA_EXMC+EX_DSISR(r13)
|
||||
EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
|
||||
FINISH_NAP
|
||||
RECONCILE_IRQ_STATE(r10, r11)
|
||||
ld r3,PACA_EXGEN+EX_DAR(r13)
|
||||
lwz r4,PACA_EXGEN+EX_DSISR(r13)
|
||||
ld r3,PACA_EXMC+EX_DAR(r13)
|
||||
lwz r4,PACA_EXMC+EX_DSISR(r13)
|
||||
/* Enable MSR_RI when finished with PACA_EXMC */
|
||||
li r10,MSR_RI
|
||||
mtmsrd r10,1
|
||||
std r3,_DAR(r1)
|
||||
std r4,_DSISR(r1)
|
||||
bl save_nvgprs
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include <linux/kprobes.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/code-patching.h>
|
||||
|
|
|
@ -153,6 +153,42 @@ void pcibios_free_controller(struct pci_controller *phb)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(pcibios_free_controller);
|
||||
|
||||
/*
|
||||
* This function is used to call pcibios_free_controller()
|
||||
* in a deferred manner: a callback from the PCI subsystem.
|
||||
*
|
||||
* _*DO NOT*_ call pcibios_free_controller() explicitly if
|
||||
* this is used (or it may access an invalid *phb pointer).
|
||||
*
|
||||
* The callback occurs when all references to the root bus
|
||||
* are dropped (e.g., child buses/devices and their users).
|
||||
*
|
||||
* It's called as .release_fn() of 'struct pci_host_bridge'
|
||||
* which is associated with the 'struct pci_controller.bus'
|
||||
* (root bus) - it expects .release_data to hold a pointer
|
||||
* to 'struct pci_controller'.
|
||||
*
|
||||
* In order to use it, register .release_fn()/release_data
|
||||
* like this:
|
||||
*
|
||||
* pci_set_host_bridge_release(bridge,
|
||||
* pcibios_free_controller_deferred
|
||||
* (void *) phb);
|
||||
*
|
||||
* e.g. in the pcibios_root_bridge_prepare() callback from
|
||||
* pci_create_root_bus().
|
||||
*/
|
||||
void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
|
||||
{
|
||||
struct pci_controller *phb = (struct pci_controller *)
|
||||
bridge->release_data;
|
||||
|
||||
pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
|
||||
|
||||
pcibios_free_controller(phb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
|
||||
|
||||
/*
|
||||
* The function is used to return the minimal alignment
|
||||
* for memory or I/O windows of the associated P2P bridge.
|
||||
|
|
|
@ -695,7 +695,7 @@ unsigned char ibm_architecture_vec[] = {
|
|||
OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
|
||||
|
||||
/* option vector 5: PAPR/OF options */
|
||||
VECTOR_LENGTH(18), /* length */
|
||||
VECTOR_LENGTH(21), /* length */
|
||||
0, /* don't ignore, don't halt */
|
||||
OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
|
||||
OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
|
||||
|
@ -726,8 +726,11 @@ unsigned char ibm_architecture_vec[] = {
|
|||
0,
|
||||
0,
|
||||
OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
|
||||
OV5_FEAT(OV5_PFO_HW_842),
|
||||
OV5_FEAT(OV5_SUB_PROCESSORS),
|
||||
OV5_FEAT(OV5_PFO_HW_842), /* Byte 17 */
|
||||
0, /* Byte 18 */
|
||||
0, /* Byte 19 */
|
||||
0, /* Byte 20 */
|
||||
OV5_FEAT(OV5_SUB_PROCESSORS), /* Byte 21 */
|
||||
|
||||
/* option vector 6: IBM PAPR hints */
|
||||
VECTOR_LENGTH(3), /* length */
|
||||
|
|
|
@ -1226,7 +1226,21 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
|
|||
(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
|
||||
if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
|
||||
goto bad;
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/*
|
||||
* If there is a transactional state then throw it away.
|
||||
* The purpose of a sigreturn is to destroy all traces of the
|
||||
* signal frame, this includes any transactional state created
|
||||
* within in. We only check for suspended as we can never be
|
||||
* active in the kernel, we are active, there is nothing better to
|
||||
* do than go ahead and Bad Thing later.
|
||||
* The cause is not important as there will never be a
|
||||
* recheckpoint so it's not user visible.
|
||||
*/
|
||||
if (MSR_TM_SUSPENDED(mfmsr()))
|
||||
tm_reclaim_current(0);
|
||||
|
||||
if (__get_user(tmp, &rt_sf->uc.uc_link))
|
||||
goto bad;
|
||||
uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
|
||||
|
|
|
@ -676,7 +676,21 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
|
|||
if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
|
||||
goto badframe;
|
||||
set_current_blocked(&set);
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/*
|
||||
* If there is a transactional state then throw it away.
|
||||
* The purpose of a sigreturn is to destroy all traces of the
|
||||
* signal frame, this includes any transactional state created
|
||||
* within in. We only check for suspended as we can never be
|
||||
* active in the kernel, we are active, there is nothing better to
|
||||
* do than go ahead and Bad Thing later.
|
||||
* The cause is not important as there will never be a
|
||||
* recheckpoint so it's not user visible.
|
||||
*/
|
||||
if (MSR_TM_SUSPENDED(mfmsr()))
|
||||
tm_reclaim_current(0);
|
||||
|
||||
if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
|
||||
goto badframe;
|
||||
if (MSR_TM_ACTIVE(msr)) {
|
||||
|
|
|
@ -830,7 +830,7 @@ int __cpu_disable(void)
|
|||
|
||||
/* Update sibling maps */
|
||||
base = cpu_first_thread_sibling(cpu);
|
||||
for (i = 0; i < threads_per_core; i++) {
|
||||
for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) {
|
||||
cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
|
||||
cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
|
||||
cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
|
||||
|
|
|
@ -25,7 +25,8 @@
|
|||
#include <linux/user.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/module.h> /* print_modules */
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
|
|
@ -78,6 +78,7 @@ kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
|
|||
|
||||
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
|
||||
book3s_hv_hmi.o \
|
||||
book3s_hv_rmhandlers.o \
|
||||
book3s_hv_rm_mmu.o \
|
||||
book3s_hv_ras.o \
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
|
|
@ -528,7 +528,6 @@ static struct platform_driver mpc512x_lpbfifo_driver = {
|
|||
.remove = mpc512x_lpbfifo_remove,
|
||||
.driver = {
|
||||
.name = DRV_NAME,
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = mpc512x_lpbfifo_match,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -222,7 +222,6 @@ static const struct of_device_id mcu_of_match_table[] = {
|
|||
static struct i2c_driver mcu_driver = {
|
||||
.driver = {
|
||||
.name = "mcu-mpc8349emitx",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = mcu_of_match_table,
|
||||
},
|
||||
.probe = mcu_probe,
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include <linux/tty.h>
|
||||
#include <linux/serial_core.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/extable.h>
|
||||
|
||||
#include <asm/time.h>
|
||||
#include <asm/machdep.h>
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#include <linux/pci.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
|
|
@ -370,6 +370,7 @@ static irqreturn_t process_dump(int irq, void *data)
|
|||
uint32_t dump_id, dump_size, dump_type;
|
||||
struct dump_obj *dump;
|
||||
char name[22];
|
||||
struct kobject *kobj;
|
||||
|
||||
rc = dump_read_info(&dump_id, &dump_size, &dump_type);
|
||||
if (rc != OPAL_SUCCESS)
|
||||
|
@ -381,8 +382,12 @@ static irqreturn_t process_dump(int irq, void *data)
|
|||
* that gracefully and not create two conflicting
|
||||
* entries.
|
||||
*/
|
||||
if (kset_find_obj(dump_kset, name))
|
||||
kobj = kset_find_obj(dump_kset, name);
|
||||
if (kobj) {
|
||||
/* Drop reference added by kset_find_obj() */
|
||||
kobject_put(kobj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dump = create_dump_obj(dump_id, dump_size, dump_type);
|
||||
if (!dump)
|
||||
|
|
|
@ -247,6 +247,7 @@ static irqreturn_t elog_event(int irq, void *data)
|
|||
uint64_t elog_type;
|
||||
int rc;
|
||||
char name[2+16+1];
|
||||
struct kobject *kobj;
|
||||
|
||||
rc = opal_get_elog_size(&id, &size, &type);
|
||||
if (rc != OPAL_SUCCESS) {
|
||||
|
@ -269,8 +270,12 @@ static irqreturn_t elog_event(int irq, void *data)
|
|||
* that gracefully and not create two conflicting
|
||||
* entries.
|
||||
*/
|
||||
if (kset_find_obj(elog_kset, name))
|
||||
kobj = kset_find_obj(elog_kset, name);
|
||||
if (kobj) {
|
||||
/* Drop reference added by kset_find_obj() */
|
||||
kobject_put(kobj);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
create_elog_obj(log_id, elog_size, elog_type);
|
||||
|
||||
|
|
|
@ -149,7 +149,7 @@ static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
|
|||
|
||||
static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
|
||||
{
|
||||
unsigned long pe = phb->ioda.total_pe_num - 1;
|
||||
long pe;
|
||||
|
||||
for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
|
||||
if (!test_and_set_bit(pe, phb->ioda.pe_alloc))
|
||||
|
|
|
@ -119,6 +119,10 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
|
|||
|
||||
bus = bridge->bus;
|
||||
|
||||
/* Rely on the pcibios_free_controller_deferred() callback. */
|
||||
pci_set_host_bridge_release(bridge, pcibios_free_controller_deferred,
|
||||
(void *) pci_bus_to_host(bus));
|
||||
|
||||
dn = pcibios_get_phb_of_node(bus);
|
||||
if (!dn)
|
||||
return 0;
|
||||
|
|
|
@ -106,8 +106,11 @@ int remove_phb_dynamic(struct pci_controller *phb)
|
|||
release_resource(res);
|
||||
}
|
||||
|
||||
/* Free pci_controller data structure */
|
||||
pcibios_free_controller(phb);
|
||||
/*
|
||||
* The pci_controller data structure is freed by
|
||||
* the pcibios_free_controller_deferred() callback;
|
||||
* see pseries_root_bridge_prepare().
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -534,7 +534,8 @@ struct cpm1_gpio16_chip {
|
|||
|
||||
static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc)
|
||||
{
|
||||
struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
|
||||
struct cpm1_gpio16_chip *cpm1_gc =
|
||||
container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc);
|
||||
struct cpm_ioport16 __iomem *iop = mm_gc->regs;
|
||||
|
||||
cpm1_gc->cpdata = in_be16(&iop->dat);
|
||||
|
@ -649,7 +650,8 @@ struct cpm1_gpio32_chip {
|
|||
|
||||
static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
|
||||
{
|
||||
struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
|
||||
struct cpm1_gpio32_chip *cpm1_gc =
|
||||
container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc);
|
||||
struct cpm_ioport32b __iomem *iop = mm_gc->regs;
|
||||
|
||||
cpm1_gc->cpdata = in_be32(&iop->dat);
|
||||
|
|
|
@ -94,7 +94,8 @@ struct cpm2_gpio32_chip {
|
|||
|
||||
static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
|
||||
{
|
||||
struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(&mm_gc->gc);
|
||||
struct cpm2_gpio32_chip *cpm2_gc =
|
||||
container_of(mm_gc, struct cpm2_gpio32_chip, mm_gc);
|
||||
struct cpm2_ioports __iomem *iop = mm_gc->regs;
|
||||
|
||||
cpm2_gc->cpdata = in_be32(&iop->dat);
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
|
|
@ -204,11 +204,9 @@ static void __init conmode_default(void)
|
|||
#endif
|
||||
}
|
||||
} else if (MACHINE_IS_KVM) {
|
||||
if (sclp.has_vt220 &&
|
||||
config_enabled(CONFIG_SCLP_VT220_CONSOLE))
|
||||
if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
|
||||
SET_CONSOLE_VT220;
|
||||
else if (sclp.has_linemode &&
|
||||
config_enabled(CONFIG_SCLP_CONSOLE))
|
||||
else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
|
||||
SET_CONSOLE_SCLP;
|
||||
else
|
||||
SET_CONSOLE_HVC;
|
||||
|
|
|
@ -81,7 +81,7 @@
|
|||
.altinstr_replacement : { *(.altinstr_replacement) }
|
||||
/* .exit.text is discard at runtime, not link time, to deal with references
|
||||
from .altinstructions and .eh_frame */
|
||||
.exit.text : { *(.exit.text) }
|
||||
.exit.text : { EXIT_TEXT }
|
||||
.exit.data : { *(.exit.data) }
|
||||
|
||||
.preinit_array : {
|
||||
|
|
|
@ -485,10 +485,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
|
|||
|
||||
req = cast_mcryptd_ctx_to_req(req_ctx);
|
||||
if (irqs_disabled())
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
else {
|
||||
local_bh_disable();
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
local_bh_enable();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -265,13 +265,14 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
|
|||
vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
|
||||
vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
|
||||
vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
|
||||
movl _args_digest+4*32(state, idx, 4), tmp2_w
|
||||
vmovd _args_digest(state , idx, 4) , %xmm0
|
||||
vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
|
||||
vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
|
||||
vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
|
||||
|
||||
vmovdqu %xmm0, _result_digest(job_rax)
|
||||
movl tmp2_w, _result_digest+1*16(job_rax)
|
||||
vmovdqu %xmm0, _result_digest(job_rax)
|
||||
offset = (_result_digest + 1*16)
|
||||
vmovdqu %xmm1, offset(job_rax)
|
||||
|
||||
pop %rbx
|
||||
|
||||
|
|
|
@ -497,10 +497,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
|
|||
|
||||
req = cast_mcryptd_ctx_to_req(req_ctx);
|
||||
if (irqs_disabled())
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
else {
|
||||
local_bh_disable();
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
local_bh_enable();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1623,6 +1623,9 @@ void __init enable_IR_x2apic(void)
|
|||
unsigned long flags;
|
||||
int ret, ir_stat;
|
||||
|
||||
if (skip_ioapic_setup)
|
||||
return;
|
||||
|
||||
ir_stat = irq_remapping_prepare();
|
||||
if (ir_stat < 0 && !x2apic_supported())
|
||||
return;
|
||||
|
|
|
@ -355,6 +355,7 @@ void load_ucode_amd_ap(void)
|
|||
unsigned int cpu = smp_processor_id();
|
||||
struct equiv_cpu_entry *eq;
|
||||
struct microcode_amd *mc;
|
||||
u8 *cont = container;
|
||||
u32 rev, eax;
|
||||
u16 eq_id;
|
||||
|
||||
|
@ -371,8 +372,11 @@ void load_ucode_amd_ap(void)
|
|||
if (check_current_patch_level(&rev, false))
|
||||
return;
|
||||
|
||||
/* Add CONFIG_RANDOMIZE_MEMORY offset. */
|
||||
cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
|
||||
|
||||
eax = cpuid_eax(0x00000001);
|
||||
eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ);
|
||||
eq = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
|
||||
|
||||
eq_id = find_equiv_id(eq, eax);
|
||||
if (!eq_id)
|
||||
|
@ -434,6 +438,9 @@ int __init save_microcode_in_initrd_amd(void)
|
|||
else
|
||||
container = cont_va;
|
||||
|
||||
/* Add CONFIG_RANDOMIZE_MEMORY offset. */
|
||||
container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
|
||||
|
||||
eax = cpuid_eax(0x00000001);
|
||||
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
|
||||
|
||||
|
|
|
@ -100,10 +100,11 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
|
|||
/* Logical package management. We might want to allocate that dynamically */
|
||||
static int *physical_to_logical_pkg __read_mostly;
|
||||
static unsigned long *physical_package_map __read_mostly;;
|
||||
static unsigned long *logical_package_map __read_mostly;
|
||||
static unsigned int max_physical_pkg_id __read_mostly;
|
||||
unsigned int __max_logical_packages __read_mostly;
|
||||
EXPORT_SYMBOL(__max_logical_packages);
|
||||
static unsigned int logical_packages __read_mostly;
|
||||
static bool logical_packages_frozen __read_mostly;
|
||||
|
||||
/* Maximum number of SMT threads on any online core */
|
||||
int __max_smt_threads __read_mostly;
|
||||
|
@ -277,14 +278,14 @@ int topology_update_package_map(unsigned int apicid, unsigned int cpu)
|
|||
if (test_and_set_bit(pkg, physical_package_map))
|
||||
goto found;
|
||||
|
||||
new = find_first_zero_bit(logical_package_map, __max_logical_packages);
|
||||
if (new >= __max_logical_packages) {
|
||||
if (logical_packages_frozen) {
|
||||
physical_to_logical_pkg[pkg] = -1;
|
||||
pr_warn("APIC(%x) Package %u exceeds logical package map\n",
|
||||
pr_warn("APIC(%x) Package %u exceeds logical package max\n",
|
||||
apicid, pkg);
|
||||
return -ENOSPC;
|
||||
}
|
||||
set_bit(new, logical_package_map);
|
||||
|
||||
new = logical_packages++;
|
||||
pr_info("APIC(%x) Converting physical %u to logical package %u\n",
|
||||
apicid, pkg, new);
|
||||
physical_to_logical_pkg[pkg] = new;
|
||||
|
@ -341,6 +342,7 @@ static void __init smp_init_package_map(void)
|
|||
}
|
||||
|
||||
__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
|
||||
logical_packages = 0;
|
||||
|
||||
/*
|
||||
* Possibly larger than what we need as the number of apic ids per
|
||||
|
@ -352,10 +354,6 @@ static void __init smp_init_package_map(void)
|
|||
memset(physical_to_logical_pkg, 0xff, size);
|
||||
size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
|
||||
physical_package_map = kzalloc(size, GFP_KERNEL);
|
||||
size = BITS_TO_LONGS(__max_logical_packages) * sizeof(unsigned long);
|
||||
logical_package_map = kzalloc(size, GFP_KERNEL);
|
||||
|
||||
pr_info("Max logical packages: %u\n", __max_logical_packages);
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
unsigned int apicid = apic->cpu_present_to_apicid(cpu);
|
||||
|
@ -369,6 +367,15 @@ static void __init smp_init_package_map(void)
|
|||
set_cpu_possible(cpu, false);
|
||||
set_cpu_present(cpu, false);
|
||||
}
|
||||
|
||||
if (logical_packages > __max_logical_packages) {
|
||||
pr_warn("Detected more packages (%u), then computed by BIOS data (%u).\n",
|
||||
logical_packages, __max_logical_packages);
|
||||
logical_packages_frozen = true;
|
||||
__max_logical_packages = logical_packages;
|
||||
}
|
||||
|
||||
pr_info("Max logical packages: %u\n", __max_logical_packages);
|
||||
}
|
||||
|
||||
void __init smp_store_boot_cpu_info(void)
|
||||
|
|
|
@ -422,6 +422,7 @@ struct nested_vmx {
|
|||
struct list_head vmcs02_pool;
|
||||
int vmcs02_num;
|
||||
u64 vmcs01_tsc_offset;
|
||||
bool change_vmcs01_virtual_x2apic_mode;
|
||||
/* L2 must run next, and mustn't decide to exit to L1. */
|
||||
bool nested_run_pending;
|
||||
/*
|
||||
|
@ -435,6 +436,8 @@ struct nested_vmx {
|
|||
bool pi_pending;
|
||||
u16 posted_intr_nv;
|
||||
|
||||
unsigned long *msr_bitmap;
|
||||
|
||||
struct hrtimer preemption_timer;
|
||||
bool preemption_timer_expired;
|
||||
|
||||
|
@ -924,7 +927,6 @@ static unsigned long *vmx_msr_bitmap_legacy;
|
|||
static unsigned long *vmx_msr_bitmap_longmode;
|
||||
static unsigned long *vmx_msr_bitmap_legacy_x2apic;
|
||||
static unsigned long *vmx_msr_bitmap_longmode_x2apic;
|
||||
static unsigned long *vmx_msr_bitmap_nested;
|
||||
static unsigned long *vmx_vmread_bitmap;
|
||||
static unsigned long *vmx_vmwrite_bitmap;
|
||||
|
||||
|
@ -2198,6 +2200,12 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
new.control) != old.control);
|
||||
}
|
||||
|
||||
static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
|
||||
{
|
||||
vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
|
||||
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
|
||||
* vcpu mutex is already taken.
|
||||
|
@ -2256,10 +2264,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
|
||||
/* Setup TSC multiplier */
|
||||
if (kvm_has_tsc_control &&
|
||||
vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
|
||||
vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
|
||||
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
|
||||
}
|
||||
vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
|
||||
decache_tsc_multiplier(vmx);
|
||||
|
||||
vmx_vcpu_pi_load(vcpu, cpu);
|
||||
vmx->host_pkru = read_pkru();
|
||||
|
@ -2508,7 +2514,7 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
|
|||
unsigned long *msr_bitmap;
|
||||
|
||||
if (is_guest_mode(vcpu))
|
||||
msr_bitmap = vmx_msr_bitmap_nested;
|
||||
msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap;
|
||||
else if (cpu_has_secondary_exec_ctrls() &&
|
||||
(vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
|
||||
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
|
||||
|
@ -6363,13 +6369,6 @@ static __init int hardware_setup(void)
|
|||
if (!vmx_msr_bitmap_longmode_x2apic)
|
||||
goto out4;
|
||||
|
||||
if (nested) {
|
||||
vmx_msr_bitmap_nested =
|
||||
(unsigned long *)__get_free_page(GFP_KERNEL);
|
||||
if (!vmx_msr_bitmap_nested)
|
||||
goto out5;
|
||||
}
|
||||
|
||||
vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
|
||||
if (!vmx_vmread_bitmap)
|
||||
goto out6;
|
||||
|
@ -6392,8 +6391,6 @@ static __init int hardware_setup(void)
|
|||
|
||||
memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
|
||||
memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
|
||||
if (nested)
|
||||
memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
|
||||
|
||||
if (setup_vmcs_config(&vmcs_config) < 0) {
|
||||
r = -EIO;
|
||||
|
@ -6529,9 +6526,6 @@ out8:
|
|||
out7:
|
||||
free_page((unsigned long)vmx_vmread_bitmap);
|
||||
out6:
|
||||
if (nested)
|
||||
free_page((unsigned long)vmx_msr_bitmap_nested);
|
||||
out5:
|
||||
free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
|
||||
out4:
|
||||
free_page((unsigned long)vmx_msr_bitmap_longmode);
|
||||
|
@ -6557,8 +6551,6 @@ static __exit void hardware_unsetup(void)
|
|||
free_page((unsigned long)vmx_io_bitmap_a);
|
||||
free_page((unsigned long)vmx_vmwrite_bitmap);
|
||||
free_page((unsigned long)vmx_vmread_bitmap);
|
||||
if (nested)
|
||||
free_page((unsigned long)vmx_msr_bitmap_nested);
|
||||
|
||||
free_kvm_area();
|
||||
}
|
||||
|
@ -6995,16 +6987,21 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
|
|||
return 1;
|
||||
}
|
||||
|
||||
if (cpu_has_vmx_msr_bitmap()) {
|
||||
vmx->nested.msr_bitmap =
|
||||
(unsigned long *)__get_free_page(GFP_KERNEL);
|
||||
if (!vmx->nested.msr_bitmap)
|
||||
goto out_msr_bitmap;
|
||||
}
|
||||
|
||||
vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
|
||||
if (!vmx->nested.cached_vmcs12)
|
||||
return -ENOMEM;
|
||||
goto out_cached_vmcs12;
|
||||
|
||||
if (enable_shadow_vmcs) {
|
||||
shadow_vmcs = alloc_vmcs();
|
||||
if (!shadow_vmcs) {
|
||||
kfree(vmx->nested.cached_vmcs12);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!shadow_vmcs)
|
||||
goto out_shadow_vmcs;
|
||||
/* mark vmcs as shadow */
|
||||
shadow_vmcs->revision_id |= (1u << 31);
|
||||
/* init shadow vmcs */
|
||||
|
@ -7024,6 +7021,15 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
|
|||
skip_emulated_instruction(vcpu);
|
||||
nested_vmx_succeed(vcpu);
|
||||
return 1;
|
||||
|
||||
out_shadow_vmcs:
|
||||
kfree(vmx->nested.cached_vmcs12);
|
||||
|
||||
out_cached_vmcs12:
|
||||
free_page((unsigned long)vmx->nested.msr_bitmap);
|
||||
|
||||
out_msr_bitmap:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -7098,6 +7104,10 @@ static void free_nested(struct vcpu_vmx *vmx)
|
|||
vmx->nested.vmxon = false;
|
||||
free_vpid(vmx->nested.vpid02);
|
||||
nested_release_vmcs12(vmx);
|
||||
if (vmx->nested.msr_bitmap) {
|
||||
free_page((unsigned long)vmx->nested.msr_bitmap);
|
||||
vmx->nested.msr_bitmap = NULL;
|
||||
}
|
||||
if (enable_shadow_vmcs)
|
||||
free_vmcs(vmx->nested.current_shadow_vmcs);
|
||||
kfree(vmx->nested.cached_vmcs12);
|
||||
|
@ -8419,6 +8429,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
|
|||
{
|
||||
u32 sec_exec_control;
|
||||
|
||||
/* Postpone execution until vmcs01 is the current VMCS. */
|
||||
if (is_guest_mode(vcpu)) {
|
||||
to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* There is not point to enable virtualize x2apic without enable
|
||||
* apicv
|
||||
|
@ -9472,8 +9488,10 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
|
|||
{
|
||||
int msr;
|
||||
struct page *page;
|
||||
unsigned long *msr_bitmap;
|
||||
unsigned long *msr_bitmap_l1;
|
||||
unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap;
|
||||
|
||||
/* This shortcut is ok because we support only x2APIC MSRs so far. */
|
||||
if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
|
||||
return false;
|
||||
|
||||
|
@ -9482,63 +9500,37 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
|
|||
WARN_ON(1);
|
||||
return false;
|
||||
}
|
||||
msr_bitmap = (unsigned long *)kmap(page);
|
||||
if (!msr_bitmap) {
|
||||
msr_bitmap_l1 = (unsigned long *)kmap(page);
|
||||
if (!msr_bitmap_l1) {
|
||||
nested_release_page_clean(page);
|
||||
WARN_ON(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
|
||||
|
||||
if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
|
||||
if (nested_cpu_has_apic_reg_virt(vmcs12))
|
||||
for (msr = 0x800; msr <= 0x8ff; msr++)
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap,
|
||||
vmx_msr_bitmap_nested,
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
msr, MSR_TYPE_R);
|
||||
/* TPR is allowed */
|
||||
nested_vmx_disable_intercept_for_msr(msr_bitmap,
|
||||
vmx_msr_bitmap_nested,
|
||||
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
APIC_BASE_MSR + (APIC_TASKPRI >> 4),
|
||||
MSR_TYPE_R | MSR_TYPE_W);
|
||||
|
||||
if (nested_cpu_has_vid(vmcs12)) {
|
||||
/* EOI and self-IPI are allowed */
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap,
|
||||
vmx_msr_bitmap_nested,
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
APIC_BASE_MSR + (APIC_EOI >> 4),
|
||||
MSR_TYPE_W);
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap,
|
||||
vmx_msr_bitmap_nested,
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
|
||||
MSR_TYPE_W);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Enable reading intercept of all the x2apic
|
||||
* MSRs. We should not rely on vmcs12 to do any
|
||||
* optimizations here, it may have been modified
|
||||
* by L1.
|
||||
*/
|
||||
for (msr = 0x800; msr <= 0x8ff; msr++)
|
||||
__vmx_enable_intercept_for_msr(
|
||||
vmx_msr_bitmap_nested,
|
||||
msr,
|
||||
MSR_TYPE_R);
|
||||
|
||||
__vmx_enable_intercept_for_msr(
|
||||
vmx_msr_bitmap_nested,
|
||||
APIC_BASE_MSR + (APIC_TASKPRI >> 4),
|
||||
MSR_TYPE_W);
|
||||
__vmx_enable_intercept_for_msr(
|
||||
vmx_msr_bitmap_nested,
|
||||
APIC_BASE_MSR + (APIC_EOI >> 4),
|
||||
MSR_TYPE_W);
|
||||
__vmx_enable_intercept_for_msr(
|
||||
vmx_msr_bitmap_nested,
|
||||
APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
|
||||
MSR_TYPE_W);
|
||||
}
|
||||
kunmap(page);
|
||||
nested_release_page_clean(page);
|
||||
|
@ -9957,10 +9949,10 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|||
}
|
||||
|
||||
if (cpu_has_vmx_msr_bitmap() &&
|
||||
exec_control & CPU_BASED_USE_MSR_BITMAPS) {
|
||||
nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
|
||||
/* MSR_BITMAP will be set by following vmx_set_efer. */
|
||||
} else
|
||||
exec_control & CPU_BASED_USE_MSR_BITMAPS &&
|
||||
nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
|
||||
; /* MSR_BITMAP will be set by following vmx_set_efer. */
|
||||
else
|
||||
exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
|
||||
|
||||
/*
|
||||
|
@ -10011,6 +10003,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|||
vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
|
||||
else
|
||||
vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
|
||||
if (kvm_has_tsc_control)
|
||||
decache_tsc_multiplier(vmx);
|
||||
|
||||
if (enable_vpid) {
|
||||
/*
|
||||
|
@ -10767,6 +10761,14 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
|||
else
|
||||
vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
|
||||
PIN_BASED_VMX_PREEMPTION_TIMER);
|
||||
if (kvm_has_tsc_control)
|
||||
decache_tsc_multiplier(vmx);
|
||||
|
||||
if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
|
||||
vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
|
||||
vmx_set_virtual_x2apic_mode(vcpu,
|
||||
vcpu->arch.apic_base & X2APIC_ENABLE);
|
||||
}
|
||||
|
||||
/* This is needed for same reason as it was needed in prepare_vmcs02 */
|
||||
vmx->host_rsp = 0;
|
||||
|
|
|
@ -77,7 +77,7 @@ static inline unsigned long get_padding(struct kaslr_memory_region *region)
|
|||
*/
|
||||
static inline bool kaslr_memory_enabled(void)
|
||||
{
|
||||
return kaslr_enabled() && !config_enabled(CONFIG_KASAN);
|
||||
return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
|
||||
}
|
||||
|
||||
/* Initialize base and padding for each memory region randomized with KASLR */
|
||||
|
|
|
@ -41,6 +41,7 @@ static DEFINE_RAW_SPINLOCK(list_lock);
|
|||
* @node: list item for parent traversal.
|
||||
* @rcu: RCU callback item for freeing.
|
||||
* @irq: back pointer to parent.
|
||||
* @enabled: true if driver enabled IRQ
|
||||
* @virq: the virtual IRQ value provided to the requesting driver.
|
||||
*
|
||||
* Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
|
||||
|
@ -50,6 +51,7 @@ struct vmd_irq {
|
|||
struct list_head node;
|
||||
struct rcu_head rcu;
|
||||
struct vmd_irq_list *irq;
|
||||
bool enabled;
|
||||
unsigned int virq;
|
||||
};
|
||||
|
||||
|
@ -122,7 +124,9 @@ static void vmd_irq_enable(struct irq_data *data)
|
|||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&list_lock, flags);
|
||||
WARN_ON(vmdirq->enabled);
|
||||
list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
|
||||
vmdirq->enabled = true;
|
||||
raw_spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
data->chip->irq_unmask(data);
|
||||
|
@ -136,8 +140,10 @@ static void vmd_irq_disable(struct irq_data *data)
|
|||
data->chip->irq_mask(data);
|
||||
|
||||
raw_spin_lock_irqsave(&list_lock, flags);
|
||||
list_del_rcu(&vmdirq->node);
|
||||
INIT_LIST_HEAD_RCU(&vmdirq->node);
|
||||
if (vmdirq->enabled) {
|
||||
list_del_rcu(&vmdirq->node);
|
||||
vmdirq->enabled = false;
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&list_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ static int set_up_temporary_mappings(void)
|
|||
return result;
|
||||
}
|
||||
|
||||
temp_level4_pgt = (unsigned long)pgd - __PAGE_OFFSET;
|
||||
temp_level4_pgt = __pa(pgd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
|
|||
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
|
||||
|
||||
/* Linux <-> Xen vCPU id mapping */
|
||||
DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
|
||||
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
|
||||
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
|
||||
|
||||
enum xen_domain_type xen_domain_type = XEN_NATIVE;
|
||||
|
|
21
block/bio.c
21
block/bio.c
|
@ -667,18 +667,19 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
|
|||
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
||||
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_DISCARD)
|
||||
goto integrity_clone;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_WRITE_SAME) {
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
|
||||
goto integrity_clone;
|
||||
break;
|
||||
default:
|
||||
bio_for_each_segment(bv, bio_src, iter)
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bv;
|
||||
break;
|
||||
}
|
||||
|
||||
bio_for_each_segment(bv, bio_src, iter)
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bv;
|
||||
|
||||
integrity_clone:
|
||||
if (bio_integrity(bio_src)) {
|
||||
int ret;
|
||||
|
||||
|
@ -1788,7 +1789,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
|
|||
* Discards need a mutable bio_vec to accommodate the payload
|
||||
* required by the DSM TRIM and UNMAP commands.
|
||||
*/
|
||||
if (bio_op(bio) == REQ_OP_DISCARD)
|
||||
if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
|
||||
split = bio_clone_bioset(bio, gfp, bs);
|
||||
else
|
||||
split = bio_clone_fast(bio, gfp, bs);
|
||||
|
|
|
@ -515,7 +515,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
|
|||
|
||||
void blk_set_queue_dying(struct request_queue *q)
|
||||
{
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
queue_flag_set(QUEUE_FLAG_DYING, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
if (q->mq_ops)
|
||||
blk_mq_wake_waiters(q);
|
||||
|
|
|
@ -94,8 +94,30 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
|
|||
bool do_split = true;
|
||||
struct bio *new = NULL;
|
||||
const unsigned max_sectors = get_max_io_size(q, bio);
|
||||
unsigned bvecs = 0;
|
||||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
/*
|
||||
* With arbitrary bio size, the incoming bio may be very
|
||||
* big. We have to split the bio into small bios so that
|
||||
* each holds at most BIO_MAX_PAGES bvecs because
|
||||
* bio_clone() can fail to allocate big bvecs.
|
||||
*
|
||||
* It should have been better to apply the limit per
|
||||
* request queue in which bio_clone() is involved,
|
||||
* instead of globally. The biggest blocker is the
|
||||
* bio_clone() in bio bounce.
|
||||
*
|
||||
* If bio is splitted by this reason, we should have
|
||||
* allowed to continue bios merging, but don't do
|
||||
* that now for making the change simple.
|
||||
*
|
||||
* TODO: deal with bio bounce's bio_clone() gracefully
|
||||
* and convert the global limit into per-queue limit.
|
||||
*/
|
||||
if (bvecs++ >= BIO_MAX_PAGES)
|
||||
goto split;
|
||||
|
||||
/*
|
||||
* If the queue doesn't support SG gaps and adding this
|
||||
* offset would create a gap, disallow it.
|
||||
|
@ -172,12 +194,18 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
|
|||
struct bio *split, *res;
|
||||
unsigned nsegs;
|
||||
|
||||
if (bio_op(*bio) == REQ_OP_DISCARD)
|
||||
switch (bio_op(*bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
split = blk_bio_discard_split(q, *bio, bs, &nsegs);
|
||||
else if (bio_op(*bio) == REQ_OP_WRITE_SAME)
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
|
||||
else
|
||||
break;
|
||||
default:
|
||||
split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
|
||||
break;
|
||||
}
|
||||
|
||||
/* physical segments can be figured out during splitting */
|
||||
res = split ? split : *bio;
|
||||
|
@ -213,7 +241,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
|
|||
* This should probably be returning 0, but blk_add_request_payload()
|
||||
* (Christoph!!!!)
|
||||
*/
|
||||
if (bio_op(bio) == REQ_OP_DISCARD)
|
||||
if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
|
||||
return 1;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_WRITE_SAME)
|
||||
|
@ -385,7 +413,9 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
|
|||
nsegs = 0;
|
||||
cluster = blk_queue_cluster(q);
|
||||
|
||||
if (bio_op(bio) == REQ_OP_DISCARD) {
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
/*
|
||||
* This is a hack - drivers should be neither modifying the
|
||||
* biovec, nor relying on bi_vcnt - but because of
|
||||
|
@ -393,19 +423,16 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
|
|||
* a payload we need to set up here (thank you Christoph) and
|
||||
* bi_vcnt is really the only way of telling if we need to.
|
||||
*/
|
||||
|
||||
if (bio->bi_vcnt)
|
||||
goto single_segment;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (bio_op(bio) == REQ_OP_WRITE_SAME) {
|
||||
single_segment:
|
||||
if (!bio->bi_vcnt)
|
||||
return 0;
|
||||
/* Fall through */
|
||||
case REQ_OP_WRITE_SAME:
|
||||
*sg = sglist;
|
||||
bvec = bio_iovec(bio);
|
||||
sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
|
||||
return 1;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
for_each_bio(bio)
|
||||
|
|
|
@ -793,11 +793,12 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
|
|||
struct list_head *dptr;
|
||||
int queued;
|
||||
|
||||
WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
|
||||
|
||||
if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
|
||||
return;
|
||||
|
||||
WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
|
||||
cpu_online(hctx->next_cpu));
|
||||
|
||||
hctx->run++;
|
||||
|
||||
/*
|
||||
|
@ -1036,10 +1037,11 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
|
|||
EXPORT_SYMBOL(blk_mq_delay_queue);
|
||||
|
||||
static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
|
||||
struct blk_mq_ctx *ctx,
|
||||
struct request *rq,
|
||||
bool at_head)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||
|
||||
trace_block_rq_insert(hctx->queue, rq);
|
||||
|
||||
if (at_head)
|
||||
|
@ -1053,20 +1055,16 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
|
|||
{
|
||||
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||
|
||||
__blk_mq_insert_req_list(hctx, ctx, rq, at_head);
|
||||
__blk_mq_insert_req_list(hctx, rq, at_head);
|
||||
blk_mq_hctx_mark_pending(hctx, ctx);
|
||||
}
|
||||
|
||||
void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
|
||||
bool async)
|
||||
bool async)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||
struct request_queue *q = rq->q;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
|
||||
|
||||
current_ctx = blk_mq_get_ctx(q);
|
||||
if (!cpu_online(ctx->cpu))
|
||||
rq->mq_ctx = ctx = current_ctx;
|
||||
|
||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||
|
||||
|
@ -1076,8 +1074,6 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
|
|||
|
||||
if (run_queue)
|
||||
blk_mq_run_hw_queue(hctx, async);
|
||||
|
||||
blk_mq_put_ctx(current_ctx);
|
||||
}
|
||||
|
||||
static void blk_mq_insert_requests(struct request_queue *q,
|
||||
|
@ -1088,14 +1084,9 @@ static void blk_mq_insert_requests(struct request_queue *q,
|
|||
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
struct blk_mq_ctx *current_ctx;
|
||||
|
||||
trace_block_unplug(q, depth, !from_schedule);
|
||||
|
||||
current_ctx = blk_mq_get_ctx(q);
|
||||
|
||||
if (!cpu_online(ctx->cpu))
|
||||
ctx = current_ctx;
|
||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||
|
||||
/*
|
||||
|
@ -1107,15 +1098,14 @@ static void blk_mq_insert_requests(struct request_queue *q,
|
|||
struct request *rq;
|
||||
|
||||
rq = list_first_entry(list, struct request, queuelist);
|
||||
BUG_ON(rq->mq_ctx != ctx);
|
||||
list_del_init(&rq->queuelist);
|
||||
rq->mq_ctx = ctx;
|
||||
__blk_mq_insert_req_list(hctx, ctx, rq, false);
|
||||
__blk_mq_insert_req_list(hctx, rq, false);
|
||||
}
|
||||
blk_mq_hctx_mark_pending(hctx, ctx);
|
||||
spin_unlock(&ctx->lock);
|
||||
|
||||
blk_mq_run_hw_queue(hctx, from_schedule);
|
||||
blk_mq_put_ctx(current_ctx);
|
||||
}
|
||||
|
||||
static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
|
||||
|
@ -1630,16 +1620,17 @@ static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* 'cpu' is going away. splice any existing rq_list entries from this
|
||||
* software queue to the hw queue dispatch list, and ensure that it
|
||||
* gets run.
|
||||
*/
|
||||
static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
|
||||
{
|
||||
struct request_queue *q = hctx->queue;
|
||||
struct blk_mq_ctx *ctx;
|
||||
LIST_HEAD(tmp);
|
||||
|
||||
/*
|
||||
* Move ctx entries to new CPU, if this one is going away.
|
||||
*/
|
||||
ctx = __blk_mq_get_ctx(q, cpu);
|
||||
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
if (!list_empty(&ctx->rq_list)) {
|
||||
|
@ -1651,24 +1642,11 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
|
|||
if (list_empty(&tmp))
|
||||
return NOTIFY_OK;
|
||||
|
||||
ctx = blk_mq_get_ctx(q);
|
||||
spin_lock(&ctx->lock);
|
||||
|
||||
while (!list_empty(&tmp)) {
|
||||
struct request *rq;
|
||||
|
||||
rq = list_first_entry(&tmp, struct request, queuelist);
|
||||
rq->mq_ctx = ctx;
|
||||
list_move_tail(&rq->queuelist, &ctx->rq_list);
|
||||
}
|
||||
|
||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||
blk_mq_hctx_mark_pending(hctx, ctx);
|
||||
|
||||
spin_unlock(&ctx->lock);
|
||||
spin_lock(&hctx->lock);
|
||||
list_splice_tail_init(&tmp, &hctx->dispatch);
|
||||
spin_unlock(&hctx->lock);
|
||||
|
||||
blk_mq_run_hw_queue(hctx, true);
|
||||
blk_mq_put_ctx(ctx);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -366,7 +366,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
|
|||
list_for_each_prev(entry, &q->queue_head) {
|
||||
struct request *pos = list_entry_rq(entry);
|
||||
|
||||
if ((req_op(rq) == REQ_OP_DISCARD) != (req_op(pos) == REQ_OP_DISCARD))
|
||||
if (req_op(rq) != req_op(pos))
|
||||
break;
|
||||
if (rq_data_dir(rq) != rq_data_dir(pos))
|
||||
break;
|
||||
|
|
|
@ -3706,22 +3706,21 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
|||
if (UFDCS->rawcmd == 1)
|
||||
UFDCS->rawcmd = 2;
|
||||
|
||||
if (mode & (FMODE_READ|FMODE_WRITE)) {
|
||||
UDRS->last_checked = 0;
|
||||
clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
|
||||
check_disk_change(bdev);
|
||||
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
|
||||
goto out;
|
||||
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
|
||||
if (!(mode & FMODE_NDELAY)) {
|
||||
if (mode & (FMODE_READ|FMODE_WRITE)) {
|
||||
UDRS->last_checked = 0;
|
||||
clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
|
||||
check_disk_change(bdev);
|
||||
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
|
||||
goto out;
|
||||
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
|
||||
goto out;
|
||||
}
|
||||
res = -EROFS;
|
||||
if ((mode & FMODE_WRITE) &&
|
||||
!test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
|
||||
goto out;
|
||||
}
|
||||
|
||||
res = -EROFS;
|
||||
|
||||
if ((mode & FMODE_WRITE) &&
|
||||
!test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
|
||||
goto out;
|
||||
|
||||
mutex_unlock(&open_lock);
|
||||
mutex_unlock(&floppy_mutex);
|
||||
return 0;
|
||||
|
|
|
@ -189,6 +189,8 @@ struct blkfront_info
|
|||
struct mutex mutex;
|
||||
struct xenbus_device *xbdev;
|
||||
struct gendisk *gd;
|
||||
u16 sector_size;
|
||||
unsigned int physical_sector_size;
|
||||
int vdevice;
|
||||
blkif_vdev_t handle;
|
||||
enum blkif_state connected;
|
||||
|
@ -910,9 +912,45 @@ static struct blk_mq_ops blkfront_mq_ops = {
|
|||
.map_queue = blk_mq_map_queue,
|
||||
};
|
||||
|
||||
static void blkif_set_queue_limits(struct blkfront_info *info)
|
||||
{
|
||||
struct request_queue *rq = info->rq;
|
||||
struct gendisk *gd = info->gd;
|
||||
unsigned int segments = info->max_indirect_segments ? :
|
||||
BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
||||
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
|
||||
|
||||
if (info->feature_discard) {
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
|
||||
blk_queue_max_discard_sectors(rq, get_capacity(gd));
|
||||
rq->limits.discard_granularity = info->discard_granularity;
|
||||
rq->limits.discard_alignment = info->discard_alignment;
|
||||
if (info->feature_secdiscard)
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
|
||||
}
|
||||
|
||||
/* Hard sector size and max sectors impersonate the equiv. hardware. */
|
||||
blk_queue_logical_block_size(rq, info->sector_size);
|
||||
blk_queue_physical_block_size(rq, info->physical_sector_size);
|
||||
blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
|
||||
|
||||
/* Each segment in a request is up to an aligned page in size. */
|
||||
blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
|
||||
blk_queue_max_segment_size(rq, PAGE_SIZE);
|
||||
|
||||
/* Ensure a merged request will fit in a single I/O ring slot. */
|
||||
blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
|
||||
|
||||
/* Make sure buffer addresses are sector-aligned. */
|
||||
blk_queue_dma_alignment(rq, 511);
|
||||
|
||||
/* Make sure we don't use bounce buffers. */
|
||||
blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
|
||||
}
|
||||
|
||||
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
|
||||
unsigned int physical_sector_size,
|
||||
unsigned int segments)
|
||||
unsigned int physical_sector_size)
|
||||
{
|
||||
struct request_queue *rq;
|
||||
struct blkfront_info *info = gd->private_data;
|
||||
|
@ -944,36 +982,11 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
|
|||
}
|
||||
|
||||
rq->queuedata = info;
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
|
||||
|
||||
if (info->feature_discard) {
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
|
||||
blk_queue_max_discard_sectors(rq, get_capacity(gd));
|
||||
rq->limits.discard_granularity = info->discard_granularity;
|
||||
rq->limits.discard_alignment = info->discard_alignment;
|
||||
if (info->feature_secdiscard)
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
|
||||
}
|
||||
|
||||
/* Hard sector size and max sectors impersonate the equiv. hardware. */
|
||||
blk_queue_logical_block_size(rq, sector_size);
|
||||
blk_queue_physical_block_size(rq, physical_sector_size);
|
||||
blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
|
||||
|
||||
/* Each segment in a request is up to an aligned page in size. */
|
||||
blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
|
||||
blk_queue_max_segment_size(rq, PAGE_SIZE);
|
||||
|
||||
/* Ensure a merged request will fit in a single I/O ring slot. */
|
||||
blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
|
||||
|
||||
/* Make sure buffer addresses are sector-aligned. */
|
||||
blk_queue_dma_alignment(rq, 511);
|
||||
|
||||
/* Make sure we don't use bounce buffers. */
|
||||
blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
|
||||
|
||||
gd->queue = rq;
|
||||
info->rq = gd->queue = rq;
|
||||
info->gd = gd;
|
||||
info->sector_size = sector_size;
|
||||
info->physical_sector_size = physical_sector_size;
|
||||
blkif_set_queue_limits(info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1136,16 +1149,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
|
|||
gd->private_data = info;
|
||||
set_capacity(gd, capacity);
|
||||
|
||||
if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
|
||||
info->max_indirect_segments ? :
|
||||
BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
|
||||
if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
|
||||
del_gendisk(gd);
|
||||
goto release;
|
||||
}
|
||||
|
||||
info->rq = gd->queue;
|
||||
info->gd = gd;
|
||||
|
||||
xlvbd_flush(info);
|
||||
|
||||
if (vdisk_info & VDISK_READONLY)
|
||||
|
@ -1315,7 +1323,7 @@ free_shadow:
|
|||
rinfo->ring_ref[i] = GRANT_INVALID_REF;
|
||||
}
|
||||
}
|
||||
free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE));
|
||||
free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
|
||||
rinfo->ring.sring = NULL;
|
||||
|
||||
if (rinfo->irq)
|
||||
|
@ -2007,8 +2015,10 @@ static int blkif_recover(struct blkfront_info *info)
|
|||
struct split_bio *split_bio;
|
||||
|
||||
blkfront_gather_backend_features(info);
|
||||
/* Reset limits changed by blk_mq_update_nr_hw_queues(). */
|
||||
blkif_set_queue_limits(info);
|
||||
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
||||
blk_queue_max_segments(info->rq, segs);
|
||||
blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
|
||||
|
||||
for (r_index = 0; r_index < info->nr_rings; r_index++) {
|
||||
struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
|
||||
|
@ -2432,7 +2442,7 @@ static void blkfront_connect(struct blkfront_info *info)
|
|||
if (err) {
|
||||
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
|
||||
info->xbdev->otherend);
|
||||
return;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
xenbus_switch_state(info->xbdev, XenbusStateConnected);
|
||||
|
@ -2445,6 +2455,11 @@ static void blkfront_connect(struct blkfront_info *info)
|
|||
device_add_disk(&info->xbdev->dev, info->gd);
|
||||
|
||||
info->is_ready = 1;
|
||||
return;
|
||||
|
||||
fail:
|
||||
blkif_free(info, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -66,10 +66,10 @@ static void kona_timer_disable_and_clear(void __iomem *base)
|
|||
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
|
||||
{
|
||||
int loop_limit = 4;
|
||||
int loop_limit = 3;
|
||||
|
||||
/*
|
||||
* Read 64-bit free running counter
|
||||
|
@ -83,18 +83,19 @@ kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
|
|||
* if new hi-word is equal to previously read hi-word then stop.
|
||||
*/
|
||||
|
||||
while (--loop_limit) {
|
||||
do {
|
||||
*msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
|
||||
*lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
|
||||
if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
|
||||
break;
|
||||
}
|
||||
} while (--loop_limit);
|
||||
if (!loop_limit) {
|
||||
pr_err("bcm_kona_timer: getting counter failed.\n");
|
||||
pr_err(" Timer will be impacted\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kona_timer_set_next_event(unsigned long clc,
|
||||
|
@ -112,8 +113,11 @@ static int kona_timer_set_next_event(unsigned long clc,
|
|||
|
||||
uint32_t lsw, msw;
|
||||
uint32_t reg;
|
||||
int ret;
|
||||
|
||||
kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
|
||||
ret = kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Load the "next" event tick value */
|
||||
writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET);
|
||||
|
|
|
@ -164,7 +164,7 @@ void __init gic_clocksource_init(unsigned int frequency)
|
|||
gic_start_count();
|
||||
}
|
||||
|
||||
static void __init gic_clocksource_of_init(struct device_node *node)
|
||||
static int __init gic_clocksource_of_init(struct device_node *node)
|
||||
{
|
||||
struct clk *clk;
|
||||
int ret;
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
#include <linux/of_irq.h>
|
||||
#include <linux/sched_clock.h>
|
||||
|
||||
#include <clocksource/pxa.h>
|
||||
|
||||
#include <asm/div64.h>
|
||||
|
||||
#define OSMR0 0x00 /* OS Timer 0 Match Register */
|
||||
|
|
|
@ -123,12 +123,16 @@ static struct clock_event_device sun4i_clockevent = {
|
|||
.set_next_event = sun4i_clkevt_next_event,
|
||||
};
|
||||
|
||||
static void sun4i_timer_clear_interrupt(void)
|
||||
{
|
||||
writel(TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_ST_REG);
|
||||
}
|
||||
|
||||
static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct clock_event_device *evt = (struct clock_event_device *)dev_id;
|
||||
|
||||
writel(0x1, timer_base + TIMER_IRQ_ST_REG);
|
||||
sun4i_timer_clear_interrupt();
|
||||
evt->event_handler(evt);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -208,6 +212,9 @@ static int __init sun4i_timer_init(struct device_node *node)
|
|||
/* Make sure timer is stopped before playing with interrupts */
|
||||
sun4i_clkevt_time_stop(0);
|
||||
|
||||
/* clear timer0 interrupt */
|
||||
sun4i_timer_clear_interrupt();
|
||||
|
||||
sun4i_clockevent.cpumask = cpu_possible_mask;
|
||||
sun4i_clockevent.irq = irq;
|
||||
|
||||
|
|
|
@ -338,7 +338,6 @@ static int __init armada_xp_timer_init(struct device_node *np)
|
|||
struct clk *clk = of_clk_get_by_name(np, "fixed");
|
||||
int ret;
|
||||
|
||||
clk = of_clk_get(np, 0);
|
||||
if (IS_ERR(clk)) {
|
||||
pr_err("Failed to get clock");
|
||||
return PTR_ERR(clk);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue