Merge 5.0-rc4 into usb-next

We need the USB fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2019-01-28 08:44:58 +01:00
commit c9381e185f
723 changed files with 7762 additions and 3867 deletions

View File

@ -72,6 +72,10 @@ ForEachMacros:
- 'apei_estatus_for_each_section' - 'apei_estatus_for_each_section'
- 'ata_for_each_dev' - 'ata_for_each_dev'
- 'ata_for_each_link' - 'ata_for_each_link'
- '__ata_qc_for_each'
- 'ata_qc_for_each'
- 'ata_qc_for_each_raw'
- 'ata_qc_for_each_with_internal'
- 'ax25_for_each' - 'ax25_for_each'
- 'ax25_uid_for_each' - 'ax25_uid_for_each'
- 'bio_for_each_integrity_vec' - 'bio_for_each_integrity_vec'
@ -85,6 +89,7 @@ ForEachMacros:
- 'blk_queue_for_each_rl' - 'blk_queue_for_each_rl'
- 'bond_for_each_slave' - 'bond_for_each_slave'
- 'bond_for_each_slave_rcu' - 'bond_for_each_slave_rcu'
- 'bpf_for_each_spilled_reg'
- 'btree_for_each_safe128' - 'btree_for_each_safe128'
- 'btree_for_each_safe32' - 'btree_for_each_safe32'
- 'btree_for_each_safe64' - 'btree_for_each_safe64'
@ -103,6 +108,8 @@ ForEachMacros:
- 'drm_atomic_crtc_for_each_plane' - 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane' - 'drm_atomic_crtc_state_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane_state' - 'drm_atomic_crtc_state_for_each_plane_state'
- 'drm_atomic_for_each_plane_damage'
- 'drm_connector_for_each_possible_encoder'
- 'drm_for_each_connector_iter' - 'drm_for_each_connector_iter'
- 'drm_for_each_crtc' - 'drm_for_each_crtc'
- 'drm_for_each_encoder' - 'drm_for_each_encoder'
@ -121,11 +128,21 @@ ForEachMacros:
- 'for_each_bio' - 'for_each_bio'
- 'for_each_board_func_rsrc' - 'for_each_board_func_rsrc'
- 'for_each_bvec' - 'for_each_bvec'
- 'for_each_card_components'
- 'for_each_card_links'
- 'for_each_card_links_safe'
- 'for_each_card_prelinks'
- 'for_each_card_rtds'
- 'for_each_card_rtds_safe'
- 'for_each_cgroup_storage_type'
- 'for_each_child_of_node' - 'for_each_child_of_node'
- 'for_each_clear_bit' - 'for_each_clear_bit'
- 'for_each_clear_bit_from' - 'for_each_clear_bit_from'
- 'for_each_cmsghdr' - 'for_each_cmsghdr'
- 'for_each_compatible_node' - 'for_each_compatible_node'
- 'for_each_component_dais'
- 'for_each_component_dais_safe'
- 'for_each_comp_order'
- 'for_each_console' - 'for_each_console'
- 'for_each_cpu' - 'for_each_cpu'
- 'for_each_cpu_and' - 'for_each_cpu_and'
@ -133,6 +150,10 @@ ForEachMacros:
- 'for_each_cpu_wrap' - 'for_each_cpu_wrap'
- 'for_each_dev_addr' - 'for_each_dev_addr'
- 'for_each_dma_cap_mask' - 'for_each_dma_cap_mask'
- 'for_each_dpcm_be'
- 'for_each_dpcm_be_rollback'
- 'for_each_dpcm_be_safe'
- 'for_each_dpcm_fe'
- 'for_each_drhd_unit' - 'for_each_drhd_unit'
- 'for_each_dss_dev' - 'for_each_dss_dev'
- 'for_each_efi_memory_desc' - 'for_each_efi_memory_desc'
@ -149,6 +170,7 @@ ForEachMacros:
- 'for_each_iommu' - 'for_each_iommu'
- 'for_each_ip_tunnel_rcu' - 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr' - 'for_each_irq_nr'
- 'for_each_link_codecs'
- 'for_each_lru' - 'for_each_lru'
- 'for_each_matching_node' - 'for_each_matching_node'
- 'for_each_matching_node_and_match' - 'for_each_matching_node_and_match'
@ -160,6 +182,7 @@ ForEachMacros:
- 'for_each_mem_range_rev' - 'for_each_mem_range_rev'
- 'for_each_migratetype_order' - 'for_each_migratetype_order'
- 'for_each_msi_entry' - 'for_each_msi_entry'
- 'for_each_msi_entry_safe'
- 'for_each_net' - 'for_each_net'
- 'for_each_netdev' - 'for_each_netdev'
- 'for_each_netdev_continue' - 'for_each_netdev_continue'
@ -183,12 +206,14 @@ ForEachMacros:
- 'for_each_node_with_property' - 'for_each_node_with_property'
- 'for_each_of_allnodes' - 'for_each_of_allnodes'
- 'for_each_of_allnodes_from' - 'for_each_of_allnodes_from'
- 'for_each_of_cpu_node'
- 'for_each_of_pci_range' - 'for_each_of_pci_range'
- 'for_each_old_connector_in_state' - 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state' - 'for_each_old_crtc_in_state'
- 'for_each_oldnew_connector_in_state' - 'for_each_oldnew_connector_in_state'
- 'for_each_oldnew_crtc_in_state' - 'for_each_oldnew_crtc_in_state'
- 'for_each_oldnew_plane_in_state' - 'for_each_oldnew_plane_in_state'
- 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state' - 'for_each_oldnew_private_obj_in_state'
- 'for_each_old_plane_in_state' - 'for_each_old_plane_in_state'
- 'for_each_old_private_obj_in_state' - 'for_each_old_private_obj_in_state'
@ -206,14 +231,17 @@ ForEachMacros:
- 'for_each_process' - 'for_each_process'
- 'for_each_process_thread' - 'for_each_process_thread'
- 'for_each_property_of_node' - 'for_each_property_of_node'
- 'for_each_registered_fb'
- 'for_each_reserved_mem_region' - 'for_each_reserved_mem_region'
- 'for_each_resv_unavail_range' - 'for_each_rtd_codec_dai'
- 'for_each_rtd_codec_dai_rollback'
- 'for_each_rtdcom' - 'for_each_rtdcom'
- 'for_each_rtdcom_safe' - 'for_each_rtdcom_safe'
- 'for_each_set_bit' - 'for_each_set_bit'
- 'for_each_set_bit_from' - 'for_each_set_bit_from'
- 'for_each_sg' - 'for_each_sg'
- 'for_each_sg_page' - 'for_each_sg_page'
- 'for_each_sibling_event'
- '__for_each_thread' - '__for_each_thread'
- 'for_each_thread' - 'for_each_thread'
- 'for_each_zone' - 'for_each_zone'
@ -251,6 +279,8 @@ ForEachMacros:
- 'hlist_nulls_for_each_entry_from' - 'hlist_nulls_for_each_entry_from'
- 'hlist_nulls_for_each_entry_rcu' - 'hlist_nulls_for_each_entry_rcu'
- 'hlist_nulls_for_each_entry_safe' - 'hlist_nulls_for_each_entry_safe'
- 'i3c_bus_for_each_i2cdev'
- 'i3c_bus_for_each_i3cdev'
- 'ide_host_for_each_port' - 'ide_host_for_each_port'
- 'ide_port_for_each_dev' - 'ide_port_for_each_dev'
- 'ide_port_for_each_present_dev' - 'ide_port_for_each_present_dev'
@ -267,11 +297,14 @@ ForEachMacros:
- 'kvm_for_each_memslot' - 'kvm_for_each_memslot'
- 'kvm_for_each_vcpu' - 'kvm_for_each_vcpu'
- 'list_for_each' - 'list_for_each'
- 'list_for_each_codec'
- 'list_for_each_codec_safe'
- 'list_for_each_entry' - 'list_for_each_entry'
- 'list_for_each_entry_continue' - 'list_for_each_entry_continue'
- 'list_for_each_entry_continue_rcu' - 'list_for_each_entry_continue_rcu'
- 'list_for_each_entry_continue_reverse' - 'list_for_each_entry_continue_reverse'
- 'list_for_each_entry_from' - 'list_for_each_entry_from'
- 'list_for_each_entry_from_rcu'
- 'list_for_each_entry_from_reverse' - 'list_for_each_entry_from_reverse'
- 'list_for_each_entry_lockless' - 'list_for_each_entry_lockless'
- 'list_for_each_entry_rcu' - 'list_for_each_entry_rcu'
@ -291,6 +324,7 @@ ForEachMacros:
- 'media_device_for_each_intf' - 'media_device_for_each_intf'
- 'media_device_for_each_link' - 'media_device_for_each_link'
- 'media_device_for_each_pad' - 'media_device_for_each_pad'
- 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev' - 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private' - 'netdev_for_each_lower_private'
- 'netdev_for_each_lower_private_rcu' - 'netdev_for_each_lower_private_rcu'
@ -357,12 +391,14 @@ ForEachMacros:
- 'sk_nulls_for_each' - 'sk_nulls_for_each'
- 'sk_nulls_for_each_from' - 'sk_nulls_for_each_from'
- 'sk_nulls_for_each_rcu' - 'sk_nulls_for_each_rcu'
- 'snd_array_for_each'
- 'snd_pcm_group_for_each_entry' - 'snd_pcm_group_for_each_entry'
- 'snd_soc_dapm_widget_for_each_path' - 'snd_soc_dapm_widget_for_each_path'
- 'snd_soc_dapm_widget_for_each_path_safe' - 'snd_soc_dapm_widget_for_each_path_safe'
- 'snd_soc_dapm_widget_for_each_sink_path' - 'snd_soc_dapm_widget_for_each_sink_path'
- 'snd_soc_dapm_widget_for_each_source_path' - 'snd_soc_dapm_widget_for_each_source_path'
- 'tb_property_for_each' - 'tb_property_for_each'
- 'tcf_exts_for_each_action'
- 'udp_portaddr_for_each_entry' - 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu' - 'udp_portaddr_for_each_entry_rcu'
- 'usb_hub_for_each_child' - 'usb_hub_for_each_child'
@ -371,6 +407,11 @@ ForEachMacros:
- 'v4l2_m2m_for_each_dst_buf_safe' - 'v4l2_m2m_for_each_dst_buf_safe'
- 'v4l2_m2m_for_each_src_buf' - 'v4l2_m2m_for_each_src_buf'
- 'v4l2_m2m_for_each_src_buf_safe' - 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq'
- 'xa_for_each'
- 'xas_for_each'
- 'xas_for_each_conflict'
- 'xas_for_each_marked'
- 'zorro_for_each_dev' - 'zorro_for_each_dev'
#IncludeBlocks: Preserve # Unknown to clang-format-5.0 #IncludeBlocks: Preserve # Unknown to clang-format-5.0

View File

@ -157,12 +157,11 @@ Q: Does BPF have a stable ABI?
------------------------------ ------------------------------
A: YES. BPF instructions, arguments to BPF programs, set of helper A: YES. BPF instructions, arguments to BPF programs, set of helper
functions and their arguments, recognized return codes are all part functions and their arguments, recognized return codes are all part
of ABI. However when tracing programs are using bpf_probe_read() helper of ABI. However there is one specific exception to tracing programs
to walk kernel internal datastructures and compile with kernel which are using helpers like bpf_probe_read() to walk kernel internal
internal headers these accesses can and will break with newer data structures and compile with kernel internal headers. Both of these
kernels. The union bpf_attr -> kern_version is checked at load time kernel internals are subject to change and can break with newer kernels
to prevent accidentally loading kprobe-based bpf programs written such that the program needs to be adapted accordingly.
for a different kernel. Networking programs don't do kern_version check.
Q: How much stack space a BPF program uses? Q: How much stack space a BPF program uses?
------------------------------------------- -------------------------------------------

View File

@ -108,12 +108,13 @@ some, but not all of the other indices changing.
Sometimes you need to ensure that a subsequent call to :c:func:`xa_store` Sometimes you need to ensure that a subsequent call to :c:func:`xa_store`
will not need to allocate memory. The :c:func:`xa_reserve` function will not need to allocate memory. The :c:func:`xa_reserve` function
will store a reserved entry at the indicated index. Users of the normal will store a reserved entry at the indicated index. Users of the
API will see this entry as containing ``NULL``. If you do not need to normal API will see this entry as containing ``NULL``. If you do
use the reserved entry, you can call :c:func:`xa_release` to remove the not need to use the reserved entry, you can call :c:func:`xa_release`
unused entry. If another user has stored to the entry in the meantime, to remove the unused entry. If another user has stored to the entry
:c:func:`xa_release` will do nothing; if instead you want the entry to in the meantime, :c:func:`xa_release` will do nothing; if instead you
become ``NULL``, you should use :c:func:`xa_erase`. want the entry to become ``NULL``, you should use :c:func:`xa_erase`.
Using :c:func:`xa_insert` on a reserved entry will fail.
If all entries in the array are ``NULL``, the :c:func:`xa_empty` function If all entries in the array are ``NULL``, the :c:func:`xa_empty` function
will return ``true``. will return ``true``.
@ -183,6 +184,8 @@ Takes xa_lock internally:
* :c:func:`xa_store_bh` * :c:func:`xa_store_bh`
* :c:func:`xa_store_irq` * :c:func:`xa_store_irq`
* :c:func:`xa_insert` * :c:func:`xa_insert`
* :c:func:`xa_insert_bh`
* :c:func:`xa_insert_irq`
* :c:func:`xa_erase` * :c:func:`xa_erase`
* :c:func:`xa_erase_bh` * :c:func:`xa_erase_bh`
* :c:func:`xa_erase_irq` * :c:func:`xa_erase_irq`

View File

@ -235,4 +235,4 @@ cpus {
=========================================== ===========================================
[1] ARM Linux Kernel documentation - CPUs bindings [1] ARM Linux Kernel documentation - CPUs bindings
Documentation/devicetree/bindings/arm/cpus.txt Documentation/devicetree/bindings/arm/cpus.yaml

View File

@ -684,7 +684,7 @@ cpus {
=========================================== ===========================================
[1] ARM Linux Kernel documentation - CPUs bindings [1] ARM Linux Kernel documentation - CPUs bindings
Documentation/devicetree/bindings/arm/cpus.txt Documentation/devicetree/bindings/arm/cpus.yaml
[2] ARM Linux Kernel documentation - PSCI bindings [2] ARM Linux Kernel documentation - PSCI bindings
Documentation/devicetree/bindings/arm/psci.txt Documentation/devicetree/bindings/arm/psci.txt

View File

@ -4,7 +4,7 @@ SP810 System Controller
Required properties: Required properties:
- compatible: standard compatible string for a Primecell peripheral, - compatible: standard compatible string for a Primecell peripheral,
see Documentation/devicetree/bindings/arm/primecell.txt see Documentation/devicetree/bindings/arm/primecell.yaml
for more details for more details
should be: "arm,sp810", "arm,primecell" should be: "arm,sp810", "arm,primecell"

View File

@ -472,4 +472,4 @@ cpus {
=============================================================================== ===============================================================================
[1] ARM Linux kernel documentation [1] ARM Linux kernel documentation
Documentation/devicetree/bindings/arm/cpus.txt Documentation/devicetree/bindings/arm/cpus.yaml

View File

@ -18,4 +18,4 @@ Required Properties:
Each clock is assigned an identifier and client nodes use this identifier Each clock is assigned an identifier and client nodes use this identifier
to specify the clock which they consume. to specify the clock which they consume.
All these identifier could be found in <dt-bindings/clock/marvell-mmp2.h>. All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>.

View File

@ -1,6 +1,6 @@
* ARM PrimeCell Color LCD Controller PL110/PL111 * ARM PrimeCell Color LCD Controller PL110/PL111
See also Documentation/devicetree/bindings/arm/primecell.txt See also Documentation/devicetree/bindings/arm/primecell.yaml
Required properties: Required properties:

View File

@ -27,7 +27,6 @@ Example:
reg = <0x04300000 0x20000>; reg = <0x04300000 0x20000>;
reg-names = "kgsl_3d0_reg_memory"; reg-names = "kgsl_3d0_reg_memory";
interrupts = <GIC_SPI 80 0>; interrupts = <GIC_SPI 80 0>;
interrupt-names = "kgsl_3d0_irq";
clock-names = clock-names =
"core", "core",
"iface", "iface",

View File

@ -14,8 +14,6 @@ Required properties:
"marvell,armada-8k-gpio" should be used for the Armada 7K and 8K "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K
SoCs (either from AP or CP), see SoCs (either from AP or CP), see
Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
and
Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
for specific details about the offset property. for specific details about the offset property.

View File

@ -78,7 +78,7 @@ Sub-nodes:
PPI affinity can be expressed as a single "ppi-partitions" node, PPI affinity can be expressed as a single "ppi-partitions" node,
containing a set of sub-nodes, each with the following property: containing a set of sub-nodes, each with the following property:
- affinity: Should be a list of phandles to CPU nodes (as described in - affinity: Should be a list of phandles to CPU nodes (as described in
Documentation/devicetree/bindings/arm/cpus.txt). Documentation/devicetree/bindings/arm/cpus.yaml).
GICv3 has one or more Interrupt Translation Services (ITS) that are GICv3 has one or more Interrupt Translation Services (ITS) that are
used to route Message Signalled Interrupts (MSI) to the CPUs. used to route Message Signalled Interrupts (MSI) to the CPUs.

View File

@ -55,7 +55,7 @@ of these nodes are defined by the individual bindings for the specific function
= EXAMPLE = EXAMPLE
The following example represents the GLINK RPM node on a MSM8996 device, with The following example represents the GLINK RPM node on a MSM8996 device, with
the function for the "rpm_request" channel defined, which is used for the function for the "rpm_request" channel defined, which is used for
regualtors and root clocks. regulators and root clocks.
apcs_glb: mailbox@9820000 { apcs_glb: mailbox@9820000 {
compatible = "qcom,msm8996-apcs-hmss-global"; compatible = "qcom,msm8996-apcs-hmss-global";

View File

@ -41,12 +41,12 @@ processor ID) and a string identifier.
- qcom,local-pid: - qcom,local-pid:
Usage: required Usage: required
Value type: <u32> Value type: <u32>
Definition: specifies the identfier of the local endpoint of this edge Definition: specifies the identifier of the local endpoint of this edge
- qcom,remote-pid: - qcom,remote-pid:
Usage: required Usage: required
Value type: <u32> Value type: <u32>
Definition: specifies the identfier of the remote endpoint of this edge Definition: specifies the identifier of the remote endpoint of this edge
= SUBNODES = SUBNODES
Each SMP2P pair contain a set of inbound and outbound entries, these are Each SMP2P pair contain a set of inbound and outbound entries, these are

View File

@ -163,6 +163,14 @@ C. Boot options
be preserved until there actually is some text is output to the console. be preserved until there actually is some text is output to the console.
This option causes fbcon to bind immediately to the fbdev device. This option causes fbcon to bind immediately to the fbdev device.
7. fbcon=logo-pos:<location>
The only possible 'location' is 'center' (without quotes), and when
given, the bootup logo is moved from the default top-left corner
location to the center of the framebuffer. If more than one logo is
displayed due to multiple CPUs, the collected line of logos is moved
as a whole.
C. Attaching, Detaching and Unloading C. Attaching, Detaching and Unloading
Before going on to how to attach, detach and unload the framebuffer console, an Before going on to how to attach, detach and unload the framebuffer console, an

View File

@ -11,19 +11,19 @@ Contents:
batman-adv batman-adv
can can
can_ucan_protocol can_ucan_protocol
dpaa2/index device_drivers/freescale/dpaa2/index
e100 device_drivers/intel/e100
e1000 device_drivers/intel/e1000
e1000e device_drivers/intel/e1000e
fm10k device_drivers/intel/fm10k
igb device_drivers/intel/igb
igbvf device_drivers/intel/igbvf
ixgb device_drivers/intel/ixgb
ixgbe device_drivers/intel/ixgbe
ixgbevf device_drivers/intel/ixgbevf
i40e device_drivers/intel/i40e
iavf device_drivers/intel/iavf
ice device_drivers/intel/ice
kapi kapi
z8530book z8530book
msg_zerocopy msg_zerocopy

View File

@ -1000,51 +1000,6 @@ The kernel interface functions are as follows:
size should be set when the call is begun. tx_total_len may not be less size should be set when the call is begun. tx_total_len may not be less
than zero. than zero.
(*) Check to see the completion state of a call so that the caller can assess
whether it needs to be retried.
enum rxrpc_call_completion {
RXRPC_CALL_SUCCEEDED,
RXRPC_CALL_REMOTELY_ABORTED,
RXRPC_CALL_LOCALLY_ABORTED,
RXRPC_CALL_LOCAL_ERROR,
RXRPC_CALL_NETWORK_ERROR,
};
int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call,
enum rxrpc_call_completion *_compl,
u32 *_abort_code);
On return, -EINPROGRESS will be returned if the call is still ongoing; if
it is finished, *_compl will be set to indicate the manner of completion,
*_abort_code will be set to any abort code that occurred. 0 will be
returned on a successful completion, -ECONNABORTED will be returned if the
client failed due to a remote abort and anything else will return an
appropriate error code.
The caller should look at this information to decide if it's worth
retrying the call.
(*) Retry a client call.
int rxrpc_kernel_retry_call(struct socket *sock,
struct rxrpc_call *call,
struct sockaddr_rxrpc *srx,
struct key *key);
This attempts to partially reinitialise a call and submit it again while
reusing the original call's Tx queue to avoid the need to repackage and
re-encrypt the data to be sent. call indicates the call to retry, srx the
new address to send it to and key the encryption key to use for signing or
encrypting the packets.
For this to work, the first Tx data packet must still be in the transmit
queue, and currently this is only permitted for local and network errors
and the call must not have been aborted. Any partially constructed Tx
packet is left as is and can continue being filled afterwards.
It returns 0 if the call was requeued and an error otherwise.
(*) Get call RTT. (*) Get call RTT.
u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call); u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call);

View File

@ -336,7 +336,26 @@ time client replies ACK, this socket will get another chance to move
to the accept queue. to the accept queue.
TCP Fast Open * TcpEstabResets
Defined in `RFC1213 tcpEstabResets`_.
.. _RFC1213 tcpEstabResets: https://tools.ietf.org/html/rfc1213#page-48
* TcpAttemptFails
Defined in `RFC1213 tcpAttemptFails`_.
.. _RFC1213 tcpAttemptFails: https://tools.ietf.org/html/rfc1213#page-48
* TcpOutRsts
Defined in `RFC1213 tcpOutRsts`_. The RFC says this counter indicates
the 'segments sent containing the RST flag', but in linux kernel, this
couner indicates the segments kerenl tried to send. The sending
process might be failed due to some errors (e.g. memory alloc failed).
.. _RFC1213 tcpOutRsts: https://tools.ietf.org/html/rfc1213#page-52
TCP Fast Path
============ ============
When kernel receives a TCP packet, it has two paths to handler the When kernel receives a TCP packet, it has two paths to handler the
packet, one is fast path, another is slow path. The comment in kernel packet, one is fast path, another is slow path. The comment in kernel
@ -383,8 +402,6 @@ increase 1.
TCP abort TCP abort
======== ========
* TcpExtTCPAbortOnData * TcpExtTCPAbortOnData
It means TCP layer has data in flight, but need to close the It means TCP layer has data in flight, but need to close the
connection. So TCP layer sends a RST to the other side, indicate the connection. So TCP layer sends a RST to the other side, indicate the
@ -545,7 +562,6 @@ packet yet, the sender would know packet 4 is out of order. The TCP
stack of kernel will increase TcpExtTCPSACKReorder for both of the stack of kernel will increase TcpExtTCPSACKReorder for both of the
above scenarios. above scenarios.
DSACK DSACK
===== =====
The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report
@ -566,13 +582,63 @@ The TCP stack receives an out of order duplicate packet, so it sends a
DSACK to the sender. DSACK to the sender.
* TcpExtTCPDSACKRecv * TcpExtTCPDSACKRecv
The TCP stack receives a DSACK, which indicate an acknowledged The TCP stack receives a DSACK, which indicates an acknowledged
duplicate packet is received. duplicate packet is received.
* TcpExtTCPDSACKOfoRecv * TcpExtTCPDSACKOfoRecv
The TCP stack receives a DSACK, which indicate an out of order The TCP stack receives a DSACK, which indicate an out of order
duplicate packet is received. duplicate packet is received.
invalid SACK and DSACK
====================
When a SACK (or DSACK) block is invalid, a corresponding counter would
be updated. The validation method is base on the start/end sequence
number of the SACK block. For more details, please refer the comment
of the function tcp_is_sackblock_valid in the kernel source code. A
SACK option could have up to 4 blocks, they are checked
individually. E.g., if 3 blocks of a SACk is invalid, the
corresponding counter would be updated 3 times. The comment of the
`Add counters for discarded SACK blocks`_ patch has additional
explaination:
.. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32
* TcpExtTCPSACKDiscard
This counter indicates how many SACK blocks are invalid. If the invalid
SACK block is caused by ACK recording, the TCP stack will only ignore
it and won't update this counter.
* TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo
When a DSACK block is invalid, one of these two counters would be
updated. Which counter will be updated depends on the undo_marker flag
of the TCP socket. If the undo_marker is not set, the TCP stack isn't
likely to re-transmit any packets, and we still receive an invalid
DSACK block, the reason might be that the packet is duplicated in the
middle of the network. In such scenario, TcpExtTCPDSACKIgnoredNoUndo
will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld
will be updated. As implied in its name, it might be an old packet.
SACK shift
=========
The linux networking stack stores data in sk_buff struct (skb for
short). If a SACK block acrosses multiple skb, the TCP stack will try
to re-arrange data in these skb. E.g. if a SACK block acknowledges seq
10 to 15, skb1 has seq 10 to 13, skb2 has seq 14 to 20. The seq 14 and
15 in skb2 would be moved to skb1. This operation is 'shift'. If a
SACK block acknowledges seq 10 to 20, skb1 has seq 10 to 13, skb2 has
seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
discard, this operation is 'merge'.
* TcpExtTCPSackShifted
A skb is shifted
* TcpExtTCPSackMerged
A skb is merged
* TcpExtTCPSackShiftFallback
A skb should be shifted or merged, but the TCP stack doesn't do it for
some reasons.
TCP out of order TCP out of order
=============== ===============
* TcpExtTCPOFOQueue * TcpExtTCPOFOQueue
@ -662,6 +728,60 @@ unacknowledged number (more strict than `RFC 5961 section 5.2`_).
.. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9 .. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9
.. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11 .. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11
TCP receive window
=================
* TcpExtTCPWantZeroWindowAdv
Depending on current memory usage, the TCP stack tries to set receive
window to zero. But the receive window might still be a no-zero
value. For example, if the previous window size is 10, and the TCP
stack receives 3 bytes, the current window size would be 7 even if the
window size calculated by the memory usage is zero.
* TcpExtTCPToZeroWindowAdv
The TCP receive window is set to zero from a no-zero value.
* TcpExtTCPFromZeroWindowAdv
The TCP receive window is set to no-zero value from zero.
Delayed ACK
==========
The TCP Delayed ACK is a technique which is used for reducing the
packet count in the network. For more details, please refer the
`Delayed ACK wiki`_
.. _Delayed ACK wiki: https://en.wikipedia.org/wiki/TCP_delayed_acknowledgment
* TcpExtDelayedACKs
A delayed ACK timer expires. The TCP stack will send a pure ACK packet
and exit the delayed ACK mode.
* TcpExtDelayedACKLocked
A delayed ACK timer expires, but the TCP stack can't send an ACK
immediately due to the socket is locked by a userspace program. The
TCP stack will send a pure ACK later (after the userspace program
unlock the socket). When the TCP stack sends the pure ACK later, the
TCP stack will also update TcpExtDelayedACKs and exit the delayed ACK
mode.
* TcpExtDelayedACKLost
It will be updated when the TCP stack receives a packet which has been
ACKed. A Delayed ACK loss might cause this issue, but it would also be
triggered by other reasons, such as a packet is duplicated in the
network.
Tail Loss Probe (TLP)
===================
TLP is an algorithm which is used to detect TCP packet loss. For more
details, please refer the `TLP paper`_.
.. _TLP paper: https://tools.ietf.org/html/draft-dukkipati-tcpm-tcp-loss-probe-01
* TcpExtTCPLossProbes
A TLP probe packet is sent.
* TcpExtTCPLossProbeRecovery
A packet loss is detected and recovered by TLP.
examples examples
======= =======

View File

@ -417,7 +417,7 @@ is again deprecated and ts[2] holds a hardware timestamp if set.
Hardware time stamping must also be initialized for each device driver Hardware time stamping must also be initialized for each device driver
that is expected to do hardware time stamping. The parameter is defined in that is expected to do hardware time stamping. The parameter is defined in
/include/linux/net_tstamp.h as: include/uapi/linux/net_tstamp.h as:
struct hwtstamp_config { struct hwtstamp_config {
int flags; /* no flags defined right now, must be zero */ int flags; /* no flags defined right now, must be zero */
@ -487,7 +487,7 @@ enum {
HWTSTAMP_FILTER_PTP_V1_L4_EVENT, HWTSTAMP_FILTER_PTP_V1_L4_EVENT,
/* for the complete list of values, please check /* for the complete list of values, please check
* the include file /include/linux/net_tstamp.h * the include file include/uapi/linux/net_tstamp.h
*/ */
}; };

View File

@ -3052,8 +3052,8 @@ F: include/linux/bcm963xx_nvram.h
F: include/linux/bcm963xx_tag.h F: include/linux/bcm963xx_tag.h
BROADCOM BNX2 GIGABIT ETHERNET DRIVER BROADCOM BNX2 GIGABIT ETHERNET DRIVER
M: Rasesh Mody <rasesh.mody@cavium.com> M: Rasesh Mody <rmody@marvell.com>
M: Dept-GELinuxNICDev@cavium.com M: GR-Linux-NIC-Dev@marvell.com
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/net/ethernet/broadcom/bnx2.* F: drivers/net/ethernet/broadcom/bnx2.*
@ -3072,9 +3072,9 @@ S: Supported
F: drivers/scsi/bnx2i/ F: drivers/scsi/bnx2i/
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
M: Ariel Elior <ariel.elior@cavium.com> M: Ariel Elior <aelior@marvell.com>
M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com> M: Sudarsana Kalluru <skalluru@marvell.com>
M: everest-linux-l2@cavium.com M: GR-everest-linux-l2@marvell.com
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/net/ethernet/broadcom/bnx2x/ F: drivers/net/ethernet/broadcom/bnx2x/
@ -3249,9 +3249,9 @@ S: Supported
F: drivers/scsi/bfa/ F: drivers/scsi/bfa/
BROCADE BNA 10 GIGABIT ETHERNET DRIVER BROCADE BNA 10 GIGABIT ETHERNET DRIVER
M: Rasesh Mody <rasesh.mody@cavium.com> M: Rasesh Mody <rmody@marvell.com>
M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com> M: Sudarsana Kalluru <skalluru@marvell.com>
M: Dept-GELinuxNICDev@cavium.com M: GR-Linux-NIC-Dev@marvell.com
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/net/ethernet/brocade/bna/ F: drivers/net/ethernet/brocade/bna/
@ -3471,10 +3471,9 @@ F: drivers/i2c/busses/i2c-octeon*
F: drivers/i2c/busses/i2c-thunderx* F: drivers/i2c/busses/i2c-thunderx*
CAVIUM LIQUIDIO NETWORK DRIVER CAVIUM LIQUIDIO NETWORK DRIVER
M: Derek Chickles <derek.chickles@caviumnetworks.com> M: Derek Chickles <dchickles@marvell.com>
M: Satanand Burla <satananda.burla@caviumnetworks.com> M: Satanand Burla <sburla@marvell.com>
M: Felix Manlunas <felix.manlunas@caviumnetworks.com> M: Felix Manlunas <fmanlunas@marvell.com>
M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
W: http://www.cavium.com W: http://www.cavium.com
S: Supported S: Supported
@ -3979,6 +3978,7 @@ F: drivers/cpufreq/arm_big_little.c
CPU POWER MONITORING SUBSYSTEM CPU POWER MONITORING SUBSYSTEM
M: Thomas Renninger <trenn@suse.com> M: Thomas Renninger <trenn@suse.com>
M: Shuah Khan <shuah@kernel.org> M: Shuah Khan <shuah@kernel.org>
M: Shuah Khan <skhan@linuxfoundation.org>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
S: Maintained S: Maintained
F: tools/power/cpupower/ F: tools/power/cpupower/
@ -8259,6 +8259,7 @@ F: include/uapi/linux/sunrpc/
KERNEL SELFTEST FRAMEWORK KERNEL SELFTEST FRAMEWORK
M: Shuah Khan <shuah@kernel.org> M: Shuah Khan <shuah@kernel.org>
M: Shuah Khan <skhan@linuxfoundation.org>
L: linux-kselftest@vger.kernel.org L: linux-kselftest@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git
Q: https://patchwork.kernel.org/project/linux-kselftest/list/ Q: https://patchwork.kernel.org/project/linux-kselftest/list/
@ -10689,9 +10690,9 @@ S: Maintained
F: drivers/net/netdevsim/* F: drivers/net/netdevsim/*
NETXEN (1/10) GbE SUPPORT NETXEN (1/10) GbE SUPPORT
M: Manish Chopra <manish.chopra@cavium.com> M: Manish Chopra <manishc@marvell.com>
M: Rahul Verma <rahul.verma@cavium.com> M: Rahul Verma <rahulv@marvell.com>
M: Dept-GELinuxNICDev@cavium.com M: GR-Linux-NIC-Dev@marvell.com
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/net/ethernet/qlogic/netxen/ F: drivers/net/ethernet/qlogic/netxen/
@ -12475,8 +12476,8 @@ S: Supported
F: drivers/scsi/qedi/ F: drivers/scsi/qedi/
QLOGIC QL4xxx ETHERNET DRIVER QLOGIC QL4xxx ETHERNET DRIVER
M: Ariel Elior <Ariel.Elior@cavium.com> M: Ariel Elior <aelior@marvell.com>
M: everest-linux-l2@cavium.com M: GR-everest-linux-l2@marvell.com
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/net/ethernet/qlogic/qed/ F: drivers/net/ethernet/qlogic/qed/
@ -12484,8 +12485,8 @@ F: include/linux/qed/
F: drivers/net/ethernet/qlogic/qede/ F: drivers/net/ethernet/qlogic/qede/
QLOGIC QL4xxx RDMA DRIVER QLOGIC QL4xxx RDMA DRIVER
M: Michal Kalderon <Michal.Kalderon@cavium.com> M: Michal Kalderon <mkalderon@marvell.com>
M: Ariel Elior <Ariel.Elior@cavium.com> M: Ariel Elior <aelior@marvell.com>
L: linux-rdma@vger.kernel.org L: linux-rdma@vger.kernel.org
S: Supported S: Supported
F: drivers/infiniband/hw/qedr/ F: drivers/infiniband/hw/qedr/
@ -12505,7 +12506,7 @@ F: Documentation/scsi/LICENSE.qla2xxx
F: drivers/scsi/qla2xxx/ F: drivers/scsi/qla2xxx/
QLOGIC QLA3XXX NETWORK DRIVER QLOGIC QLA3XXX NETWORK DRIVER
M: Dept-GELinuxNICDev@cavium.com M: GR-Linux-NIC-Dev@marvell.com
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx
@ -12519,16 +12520,16 @@ F: Documentation/scsi/LICENSE.qla4xxx
F: drivers/scsi/qla4xxx/ F: drivers/scsi/qla4xxx/
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
M: Shahed Shaikh <Shahed.Shaikh@cavium.com> M: Shahed Shaikh <shshaikh@marvell.com>
M: Manish Chopra <manish.chopra@cavium.com> M: Manish Chopra <manishc@marvell.com>
M: Dept-GELinuxNICDev@cavium.com M: GR-Linux-NIC-Dev@marvell.com
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/net/ethernet/qlogic/qlcnic/ F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER QLOGIC QLGE 10Gb ETHERNET DRIVER
M: Manish Chopra <manish.chopra@cavium.com> M: Manish Chopra <manishc@marvell.com>
M: Dept-GELinuxNICDev@cavium.com M: GR-Linux-NIC-Dev@marvell.com
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/net/ethernet/qlogic/qlge/ F: drivers/net/ethernet/qlogic/qlge/
@ -15842,6 +15843,7 @@ F: drivers/usb/common/usb-otg-fsm.c
USB OVER IP DRIVER USB OVER IP DRIVER
M: Valentina Manea <valentina.manea.m@gmail.com> M: Valentina Manea <valentina.manea.m@gmail.com>
M: Shuah Khan <shuah@kernel.org> M: Shuah Khan <shuah@kernel.org>
M: Shuah Khan <skhan@linuxfoundation.org>
L: linux-usb@vger.kernel.org L: linux-usb@vger.kernel.org
S: Maintained S: Maintained
F: Documentation/usb/usbip_protocol.txt F: Documentation/usb/usbip_protocol.txt

View File

@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 0 PATCHLEVEL = 0
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc2 EXTRAVERSION = -rc4
NAME = Shy Crocodile NAME = Shy Crocodile
# *DOCUMENTATION* # *DOCUMENTATION*
@ -955,6 +955,7 @@ ifdef CONFIG_STACK_VALIDATION
endif endif
endif endif
PHONY += prepare0
ifeq ($(KBUILD_EXTMOD),) ifeq ($(KBUILD_EXTMOD),)
core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
@ -1061,8 +1062,7 @@ scripts: scripts_basic scripts_dtc
# archprepare is used in arch Makefiles and when processed asm symlink, # archprepare is used in arch Makefiles and when processed asm symlink,
# version.h and scripts_basic is processed / created. # version.h and scripts_basic is processed / created.
# Listed in dependency order PHONY += prepare archprepare prepare1 prepare2 prepare3
PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
# prepare3 is used to check if we are building in a separate output directory, # prepare3 is used to check if we are building in a separate output directory,
# and if so do: # and if so do:
@ -1360,11 +1360,11 @@ mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS))
mrproper: rm-files := $(wildcard $(MRPROPER_FILES)) mrproper: rm-files := $(wildcard $(MRPROPER_FILES))
mrproper-dirs := $(addprefix _mrproper_,scripts) mrproper-dirs := $(addprefix _mrproper_,scripts)
PHONY += $(mrproper-dirs) mrproper archmrproper PHONY += $(mrproper-dirs) mrproper
$(mrproper-dirs): $(mrproper-dirs):
$(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@) $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@)
mrproper: clean archmrproper $(mrproper-dirs) mrproper: clean $(mrproper-dirs)
$(call cmd,rmdirs) $(call cmd,rmdirs)
$(call cmd,rmfiles) $(call cmd,rmfiles)

View File

@ -3,23 +3,19 @@ generic-y += bugs.h
generic-y += compat.h generic-y += compat.h
generic-y += device.h generic-y += device.h
generic-y += div64.h generic-y += div64.h
generic-y += dma-mapping.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += extable.h generic-y += extable.h
generic-y += fb.h
generic-y += ftrace.h generic-y += ftrace.h
generic-y += hardirq.h generic-y += hardirq.h
generic-y += hw_irq.h generic-y += hw_irq.h
generic-y += irq_regs.h generic-y += irq_regs.h
generic-y += irq_work.h generic-y += irq_work.h
generic-y += kmap_types.h
generic-y += local.h generic-y += local.h
generic-y += local64.h generic-y += local64.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h generic-y += mm-arch-hooks.h
generic-y += msi.h generic-y += msi.h
generic-y += parport.h generic-y += parport.h
generic-y += pci.h
generic-y += percpu.h generic-y += percpu.h
generic-y += preempt.h generic-y += preempt.h
generic-y += topology.h generic-y += topology.h

View File

@ -216,6 +216,14 @@ struct bcr_fp_arcv2 {
#endif #endif
}; };
struct bcr_actionpoint {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:21, min:1, num:2, ver:8;
#else
unsigned int ver:8, num:2, min:1, pad:21;
#endif
};
#include <soc/arc/timers.h> #include <soc/arc/timers.h>
struct bcr_bpu_arcompact { struct bcr_bpu_arcompact {
@ -283,7 +291,7 @@ struct cpuinfo_arc_cache {
}; };
struct cpuinfo_arc_bpu { struct cpuinfo_arc_bpu {
unsigned int ver, full, num_cache, num_pred; unsigned int ver, full, num_cache, num_pred, ret_stk;
}; };
struct cpuinfo_arc_ccm { struct cpuinfo_arc_ccm {
@ -302,7 +310,7 @@ struct cpuinfo_arc {
struct { struct {
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4, fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
debug:1, ap:1, smart:1, rtt:1, pad3:4, ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
} extn; } extn;
struct bcr_mpy extn_mpy; struct bcr_mpy extn_mpy;

View File

@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x)
/* /*
* __ffs: Similar to ffs, but zero based (0-31) * __ffs: Similar to ffs, but zero based (0-31)
*/ */
static inline __attribute__ ((const)) int __ffs(unsigned long word) static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
{ {
if (!word) if (!word)
return word; return word;
@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x)
/* /*
* __ffs: Similar to ffs, but zero based (0-31) * __ffs: Similar to ffs, but zero based (0-31)
*/ */
static inline __attribute__ ((const)) int __ffs(unsigned long x) static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
{ {
int n; unsigned long n;
asm volatile( asm volatile(
" ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */ " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */

View File

@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
/* counts condition */ /* counts condition */
[PERF_COUNT_HW_INSTRUCTIONS] = "iall", [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */ /* All jump instructions that are taken */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
#ifdef CONFIG_ISA_ARCV2 #ifdef CONFIG_ISA_ARCV2
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",

View File

@ -1,15 +1,10 @@
/* // SPDX-License-Identifier: GPL-2.0+
* Linux performance counter support for ARC700 series //
* // Linux performance counter support for ARC CPUs.
* Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com) // This code is inspired by the perf support of various other architectures.
* //
* This code is inspired by the perf support of various other architectures. // Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
@ -19,12 +14,31 @@
#include <asm/arcregs.h> #include <asm/arcregs.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
/* HW holds 8 symbols + one for null terminator */
#define ARCPMU_EVENT_NAME_LEN 9
enum arc_pmu_attr_groups {
ARCPMU_ATTR_GR_EVENTS,
ARCPMU_ATTR_GR_FORMATS,
ARCPMU_NR_ATTR_GR
};
struct arc_pmu_raw_event_entry {
char name[ARCPMU_EVENT_NAME_LEN];
};
struct arc_pmu { struct arc_pmu {
struct pmu pmu; struct pmu pmu;
unsigned int irq; unsigned int irq;
int n_counters; int n_counters;
int n_events;
u64 max_period; u64 max_period;
int ev_hw_idx[PERF_COUNT_ARC_HW_MAX]; int ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
struct arc_pmu_raw_event_entry *raw_entry;
struct attribute **attrs;
struct perf_pmu_events_attr *attr;
const struct attribute_group *attr_groups[ARCPMU_NR_ATTR_GR + 1];
}; };
struct arc_pmu_cpu { struct arc_pmu_cpu {
@ -49,6 +63,7 @@ static int callchain_trace(unsigned int addr, void *data)
{ {
struct arc_callchain_trace *ctrl = data; struct arc_callchain_trace *ctrl = data;
struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
perf_callchain_store(entry, addr); perf_callchain_store(entry, addr);
if (ctrl->depth++ < 3) if (ctrl->depth++ < 3)
@ -57,8 +72,8 @@ static int callchain_trace(unsigned int addr, void *data)
return -1; return -1;
} }
void void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) struct pt_regs *regs)
{ {
struct arc_callchain_trace ctrl = { struct arc_callchain_trace ctrl = {
.depth = 0, .depth = 0,
@ -68,8 +83,8 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
arc_unwind_core(NULL, regs, callchain_trace, &ctrl); arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
} }
void void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) struct pt_regs *regs)
{ {
/* /*
* User stack can't be unwound trivially with kernel dwarf unwinder * User stack can't be unwound trivially with kernel dwarf unwinder
@ -82,10 +97,10 @@ static struct arc_pmu *arc_pmu;
static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu); static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
/* read counter #idx; note that counter# != event# on ARC! */ /* read counter #idx; note that counter# != event# on ARC! */
static uint64_t arc_pmu_read_counter(int idx) static u64 arc_pmu_read_counter(int idx)
{ {
uint32_t tmp; u32 tmp;
uint64_t result; u64 result;
/* /*
* ARC supports making 'snapshots' of the counters, so we don't * ARC supports making 'snapshots' of the counters, so we don't
@ -94,7 +109,7 @@ static uint64_t arc_pmu_read_counter(int idx)
write_aux_reg(ARC_REG_PCT_INDEX, idx); write_aux_reg(ARC_REG_PCT_INDEX, idx);
tmp = read_aux_reg(ARC_REG_PCT_CONTROL); tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN); write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32; result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
result |= read_aux_reg(ARC_REG_PCT_SNAPL); result |= read_aux_reg(ARC_REG_PCT_SNAPL);
return result; return result;
@ -103,9 +118,9 @@ static uint64_t arc_pmu_read_counter(int idx)
static void arc_perf_event_update(struct perf_event *event, static void arc_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx) struct hw_perf_event *hwc, int idx)
{ {
uint64_t prev_raw_count = local64_read(&hwc->prev_count); u64 prev_raw_count = local64_read(&hwc->prev_count);
uint64_t new_raw_count = arc_pmu_read_counter(idx); u64 new_raw_count = arc_pmu_read_counter(idx);
int64_t delta = new_raw_count - prev_raw_count; s64 delta = new_raw_count - prev_raw_count;
/* /*
* We aren't afraid of hwc->prev_count changing beneath our feet * We aren't afraid of hwc->prev_count changing beneath our feet
@ -155,7 +170,7 @@ static int arc_pmu_event_init(struct perf_event *event)
int ret; int ret;
if (!is_sampling_event(event)) { if (!is_sampling_event(event)) {
hwc->sample_period = arc_pmu->max_period; hwc->sample_period = arc_pmu->max_period;
hwc->last_period = hwc->sample_period; hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period); local64_set(&hwc->period_left, hwc->sample_period);
} }
@ -192,6 +207,18 @@ static int arc_pmu_event_init(struct perf_event *event)
pr_debug("init cache event with h/w %08x \'%s\'\n", pr_debug("init cache event with h/w %08x \'%s\'\n",
(int)hwc->config, arc_pmu_ev_hw_map[ret]); (int)hwc->config, arc_pmu_ev_hw_map[ret]);
return 0; return 0;
case PERF_TYPE_RAW:
if (event->attr.config >= arc_pmu->n_events)
return -ENOENT;
hwc->config |= event->attr.config;
pr_debug("init raw event with idx %lld \'%s\'\n",
event->attr.config,
arc_pmu->raw_entry[event->attr.config].name);
return 0;
default: default:
return -ENOENT; return -ENOENT;
} }
@ -200,7 +227,7 @@ static int arc_pmu_event_init(struct perf_event *event)
/* starts all counters */ /* starts all counters */
static void arc_pmu_enable(struct pmu *pmu) static void arc_pmu_enable(struct pmu *pmu)
{ {
uint32_t tmp; u32 tmp;
tmp = read_aux_reg(ARC_REG_PCT_CONTROL); tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1); write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
} }
@ -208,7 +235,7 @@ static void arc_pmu_enable(struct pmu *pmu)
/* stops all counters */ /* stops all counters */
static void arc_pmu_disable(struct pmu *pmu) static void arc_pmu_disable(struct pmu *pmu)
{ {
uint32_t tmp; u32 tmp;
tmp = read_aux_reg(ARC_REG_PCT_CONTROL); tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0); write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
} }
@ -228,7 +255,7 @@ static int arc_pmu_event_set_period(struct perf_event *event)
local64_set(&hwc->period_left, left); local64_set(&hwc->period_left, left);
hwc->last_period = period; hwc->last_period = period;
overflow = 1; overflow = 1;
} else if (unlikely(left <= 0)) { } else if (unlikely(left <= 0)) {
/* left underflowed by less than period. */ /* left underflowed by less than period. */
left += period; left += period;
local64_set(&hwc->period_left, left); local64_set(&hwc->period_left, left);
@ -246,8 +273,8 @@ static int arc_pmu_event_set_period(struct perf_event *event)
write_aux_reg(ARC_REG_PCT_INDEX, idx); write_aux_reg(ARC_REG_PCT_INDEX, idx);
/* Write value */ /* Write value */
write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value); write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value));
write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32)); write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value));
perf_event_update_userpage(event); perf_event_update_userpage(event);
@ -277,7 +304,7 @@ static void arc_pmu_start(struct perf_event *event, int flags)
/* Enable interrupt for this counter */ /* Enable interrupt for this counter */
if (is_sampling_event(event)) if (is_sampling_event(event))
write_aux_reg(ARC_REG_PCT_INT_CTRL, write_aux_reg(ARC_REG_PCT_INT_CTRL,
read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
/* enable ARC pmu here */ /* enable ARC pmu here */
write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */ write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */
@ -295,9 +322,9 @@ static void arc_pmu_stop(struct perf_event *event, int flags)
* Reset interrupt flag by writing of 1. This is required * Reset interrupt flag by writing of 1. This is required
* to make sure pending interrupt was not left. * to make sure pending interrupt was not left.
*/ */
write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
write_aux_reg(ARC_REG_PCT_INT_CTRL, write_aux_reg(ARC_REG_PCT_INT_CTRL,
read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx)); read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx));
} }
if (!(event->hw.state & PERF_HES_STOPPED)) { if (!(event->hw.state & PERF_HES_STOPPED)) {
@ -349,9 +376,10 @@ static int arc_pmu_add(struct perf_event *event, int flags)
if (is_sampling_event(event)) { if (is_sampling_event(event)) {
/* Mimic full counter overflow as other arches do */ /* Mimic full counter overflow as other arches do */
write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period); write_aux_reg(ARC_REG_PCT_INT_CNTL,
lower_32_bits(arc_pmu->max_period));
write_aux_reg(ARC_REG_PCT_INT_CNTH, write_aux_reg(ARC_REG_PCT_INT_CNTH,
(arc_pmu->max_period >> 32)); upper_32_bits(arc_pmu->max_period));
} }
write_aux_reg(ARC_REG_PCT_CONFIG, 0); write_aux_reg(ARC_REG_PCT_CONFIG, 0);
@ -392,7 +420,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
idx = __ffs(active_ints); idx = __ffs(active_ints);
/* Reset interrupt flag by writing of 1 */ /* Reset interrupt flag by writing of 1 */
write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
/* /*
* On reset of "interrupt active" bit corresponding * On reset of "interrupt active" bit corresponding
@ -400,7 +428,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
* Now we need to re-enable interrupt for the counter. * Now we need to re-enable interrupt for the counter.
*/ */
write_aux_reg(ARC_REG_PCT_INT_CTRL, write_aux_reg(ARC_REG_PCT_INT_CTRL,
read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
event = pmu_cpu->act_counter[idx]; event = pmu_cpu->act_counter[idx];
hwc = &event->hw; hwc = &event->hw;
@ -414,7 +442,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
arc_pmu_stop(event, 0); arc_pmu_stop(event, 0);
} }
active_ints &= ~(1U << idx); active_ints &= ~BIT(idx);
} while (active_ints); } while (active_ints);
done: done:
@ -441,19 +469,108 @@ static void arc_cpu_pmu_irq_init(void *data)
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
} }
/* Event field occupies the bottom 15 bits of our config field */
PMU_FORMAT_ATTR(event, "config:0-14");
static struct attribute *arc_pmu_format_attrs[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute_group arc_pmu_format_attr_gr = {
.name = "format",
.attrs = arc_pmu_format_attrs,
};
static ssize_t arc_pmu_events_sysfs_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
}
/*
* We don't add attrs here as we don't have pre-defined list of perf events.
* We will generate and add attrs dynamically in probe() after we read HW
* configuration.
*/
static struct attribute_group arc_pmu_events_attr_gr = {
.name = "events",
};
static void arc_pmu_add_raw_event_attr(int j, char *str)
{
memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1);
arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name;
arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show;
arc_pmu->attr[j].id = j;
arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr);
}
static int arc_pmu_raw_alloc(struct device *dev)
{
arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO);
if (!arc_pmu->attr)
return -ENOMEM;
arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO);
if (!arc_pmu->attrs)
return -ENOMEM;
arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events,
sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO);
if (!arc_pmu->raw_entry)
return -ENOMEM;
return 0;
}
static inline bool event_in_hw_event_map(int i, char *name)
{
if (!arc_pmu_ev_hw_map[i])
return false;
if (!strlen(arc_pmu_ev_hw_map[i]))
return false;
if (strcmp(arc_pmu_ev_hw_map[i], name))
return false;
return true;
}
static void arc_pmu_map_hw_event(int j, char *str)
{
int i;
/* See if HW condition has been mapped to a perf event_id */
for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
if (event_in_hw_event_map(i, str)) {
pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
i, str, j);
arc_pmu->ev_hw_idx[i] = j;
}
}
}
static int arc_pmu_device_probe(struct platform_device *pdev) static int arc_pmu_device_probe(struct platform_device *pdev)
{ {
struct arc_reg_pct_build pct_bcr; struct arc_reg_pct_build pct_bcr;
struct arc_reg_cc_build cc_bcr; struct arc_reg_cc_build cc_bcr;
int i, j, has_interrupts; int i, has_interrupts;
int counter_size; /* in bits */ int counter_size; /* in bits */
union cc_name { union cc_name {
struct { struct {
uint32_t word0, word1; u32 word0, word1;
char sentinel; char sentinel;
} indiv; } indiv;
char str[9]; char str[ARCPMU_EVENT_NAME_LEN];
} cc_name; } cc_name;
@ -463,15 +580,22 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
return -ENODEV; return -ENODEV;
} }
BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32); BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS); if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS))
return -EINVAL;
READ_BCR(ARC_REG_CC_BUILD, cc_bcr); READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */ if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?"))
return -EINVAL;
arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL); arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
if (!arc_pmu) if (!arc_pmu)
return -ENOMEM; return -ENOMEM;
arc_pmu->n_events = cc_bcr.c;
if (arc_pmu_raw_alloc(&pdev->dev))
return -ENOMEM;
has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0; has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
arc_pmu->n_counters = pct_bcr.c; arc_pmu->n_counters = pct_bcr.c;
@ -481,30 +605,26 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n", pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
arc_pmu->n_counters, counter_size, cc_bcr.c, arc_pmu->n_counters, counter_size, cc_bcr.c,
has_interrupts ? ", [overflow IRQ support]":""); has_interrupts ? ", [overflow IRQ support]" : "");
cc_name.str[8] = 0; cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0;
for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++) for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
arc_pmu->ev_hw_idx[i] = -1; arc_pmu->ev_hw_idx[i] = -1;
/* loop thru all available h/w condition indexes */ /* loop thru all available h/w condition indexes */
for (j = 0; j < cc_bcr.c; j++) { for (i = 0; i < cc_bcr.c; i++) {
write_aux_reg(ARC_REG_CC_INDEX, j); write_aux_reg(ARC_REG_CC_INDEX, i);
cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
/* See if it has been mapped to a perf event_id */ arc_pmu_map_hw_event(i, cc_name.str);
for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { arc_pmu_add_raw_event_attr(i, cc_name.str);
if (arc_pmu_ev_hw_map[i] &&
!strcmp(arc_pmu_ev_hw_map[i], cc_name.str) &&
strlen(arc_pmu_ev_hw_map[i])) {
pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
i, cc_name.str, j);
arc_pmu->ev_hw_idx[i] = j;
}
}
} }
arc_pmu_events_attr_gr.attrs = arc_pmu->attrs;
arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr;
arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr;
arc_pmu->pmu = (struct pmu) { arc_pmu->pmu = (struct pmu) {
.pmu_enable = arc_pmu_enable, .pmu_enable = arc_pmu_enable,
.pmu_disable = arc_pmu_disable, .pmu_disable = arc_pmu_disable,
@ -514,6 +634,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
.start = arc_pmu_start, .start = arc_pmu_start,
.stop = arc_pmu_stop, .stop = arc_pmu_stop,
.read = arc_pmu_read, .read = arc_pmu_read,
.attr_groups = arc_pmu->attr_groups,
}; };
if (has_interrupts) { if (has_interrupts) {
@ -535,17 +656,19 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
} else } else
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); /*
* perf parser doesn't really like '-' symbol in events name, so let's
* use '_' in arc pct name as it goes to kernel PMU event prefix.
*/
return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);
} }
#ifdef CONFIG_OF
static const struct of_device_id arc_pmu_match[] = { static const struct of_device_id arc_pmu_match[] = {
{ .compatible = "snps,arc700-pct" }, { .compatible = "snps,arc700-pct" },
{ .compatible = "snps,archs-pct" }, { .compatible = "snps,archs-pct" },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, arc_pmu_match); MODULE_DEVICE_TABLE(of, arc_pmu_match);
#endif
static struct platform_driver arc_pmu_driver = { static struct platform_driver arc_pmu_driver = {
.driver = { .driver = {

View File

@ -123,6 +123,7 @@ static void read_arc_build_cfg_regs(void)
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
const struct id_to_str *tbl; const struct id_to_str *tbl;
struct bcr_isa_arcv2 isa; struct bcr_isa_arcv2 isa;
struct bcr_actionpoint ap;
FIX_PTR(cpu); FIX_PTR(cpu);
@ -195,6 +196,7 @@ static void read_arc_build_cfg_regs(void)
cpu->bpu.full = bpu.ft; cpu->bpu.full = bpu.ft;
cpu->bpu.num_cache = 256 << bpu.bce; cpu->bpu.num_cache = 256 << bpu.bce;
cpu->bpu.num_pred = 2048 << bpu.pte; cpu->bpu.num_pred = 2048 << bpu.pte;
cpu->bpu.ret_stk = 4 << bpu.rse;
if (cpu->core.family >= 0x54) { if (cpu->core.family >= 0x54) {
unsigned int exec_ctrl; unsigned int exec_ctrl;
@ -207,8 +209,11 @@ static void read_arc_build_cfg_regs(void)
} }
} }
READ_BCR(ARC_REG_AP_BCR, bcr); READ_BCR(ARC_REG_AP_BCR, ap);
cpu->extn.ap = bcr.ver ? 1 : 0; if (ap.ver) {
cpu->extn.ap_num = 2 << ap.num;
cpu->extn.ap_full = !!ap.min;
}
READ_BCR(ARC_REG_SMART_BCR, bcr); READ_BCR(ARC_REG_SMART_BCR, bcr);
cpu->extn.smart = bcr.ver ? 1 : 0; cpu->extn.smart = bcr.ver ? 1 : 0;
@ -216,8 +221,6 @@ static void read_arc_build_cfg_regs(void)
READ_BCR(ARC_REG_RTT_BCR, bcr); READ_BCR(ARC_REG_RTT_BCR, bcr);
cpu->extn.rtt = bcr.ver ? 1 : 0; cpu->extn.rtt = bcr.ver ? 1 : 0;
cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
READ_BCR(ARC_REG_ISA_CFG_BCR, isa); READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
/* some hacks for lack of feature BCR info in old ARC700 cores */ /* some hacks for lack of feature BCR info in old ARC700 cores */
@ -299,10 +302,10 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
if (cpu->bpu.ver) if (cpu->bpu.ver)
n += scnprintf(buf + n, len - n, n += scnprintf(buf + n, len - n,
"BPU\t\t: %s%s match, cache:%d, Predict Table:%d", "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
IS_AVAIL1(cpu->bpu.full, "full"), IS_AVAIL1(cpu->bpu.full, "full"),
IS_AVAIL1(!cpu->bpu.full, "partial"), IS_AVAIL1(!cpu->bpu.full, "partial"),
cpu->bpu.num_cache, cpu->bpu.num_pred); cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
if (is_isa_arcv2()) { if (is_isa_arcv2()) {
struct bcr_lpb lpb; struct bcr_lpb lpb;
@ -336,11 +339,17 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
IS_AVAIL1(cpu->extn.fpu_sp, "SP "), IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
IS_AVAIL1(cpu->extn.fpu_dp, "DP ")); IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
if (cpu->extn.debug) if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) {
n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n", n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
IS_AVAIL1(cpu->extn.ap, "ActionPoint "),
IS_AVAIL1(cpu->extn.smart, "smaRT "), IS_AVAIL1(cpu->extn.smart, "smaRT "),
IS_AVAIL1(cpu->extn.rtt, "RTT ")); IS_AVAIL1(cpu->extn.rtt, "RTT "));
if (cpu->extn.ap_num) {
n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
cpu->extn.ap_num,
cpu->extn.ap_full ? "full":"min");
}
n += scnprintf(buf + n, len - n, "\n");
}
if (cpu->dccm.sz || cpu->iccm.sz) if (cpu->dccm.sz || cpu->iccm.sz)
n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n", n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",

View File

@ -18,6 +18,8 @@
#include <asm/arcregs.h> #include <asm/arcregs.h>
#include <asm/irqflags.h> #include <asm/irqflags.h>
#define ARC_PATH_MAX 256
/* /*
* Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
* -Prints 3 regs per line and a CR. * -Prints 3 regs per line and a CR.
@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs)
print_reg_file(&(cregs->r13), 13); print_reg_file(&(cregs->r13), 13);
} }
static void print_task_path_n_nm(struct task_struct *tsk, char *buf) static void print_task_path_n_nm(struct task_struct *tsk)
{ {
char *path_nm = NULL; char *path_nm = NULL;
struct mm_struct *mm; struct mm_struct *mm;
struct file *exe_file; struct file *exe_file;
char buf[ARC_PATH_MAX];
mm = get_task_mm(tsk); mm = get_task_mm(tsk);
if (!mm) if (!mm)
@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
mmput(mm); mmput(mm);
if (exe_file) { if (exe_file) {
path_nm = file_path(exe_file, buf, 255); path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1);
fput(exe_file); fput(exe_file);
} }
@ -80,10 +83,9 @@ done:
pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?"); pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
} }
static void show_faulting_vma(unsigned long address, char *buf) static void show_faulting_vma(unsigned long address)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
char *nm = buf;
struct mm_struct *active_mm = current->active_mm; struct mm_struct *active_mm = current->active_mm;
/* can't use print_vma_addr() yet as it doesn't check for /* can't use print_vma_addr() yet as it doesn't check for
@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf)
* if the container VMA is not found * if the container VMA is not found
*/ */
if (vma && (vma->vm_start <= address)) { if (vma && (vma->vm_start <= address)) {
char buf[ARC_PATH_MAX];
char *nm = "?";
if (vma->vm_file) { if (vma->vm_file) {
nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1); nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
if (IS_ERR(nm)) if (IS_ERR(nm))
nm = "?"; nm = "?";
} }
@ -173,13 +178,14 @@ void show_regs(struct pt_regs *regs)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct callee_regs *cregs; struct callee_regs *cregs;
char *buf;
buf = (char *)__get_free_page(GFP_KERNEL); /*
if (!buf) * generic code calls us with preemption disabled, but some calls
return; * here could sleep, so re-enable to avoid lockdep splat
*/
preempt_enable();
print_task_path_n_nm(tsk, buf); print_task_path_n_nm(tsk);
show_regs_print_info(KERN_INFO); show_regs_print_info(KERN_INFO);
show_ecr_verbose(regs); show_ecr_verbose(regs);
@ -189,7 +195,7 @@ void show_regs(struct pt_regs *regs)
(void *)regs->blink, (void *)regs->ret); (void *)regs->blink, (void *)regs->ret);
if (user_mode(regs)) if (user_mode(regs))
show_faulting_vma(regs->ret, buf); /* faulting code, not data */ show_faulting_vma(regs->ret); /* faulting code, not data */
pr_info("[STAT32]: 0x%08lx", regs->status32); pr_info("[STAT32]: 0x%08lx", regs->status32);
@ -222,7 +228,7 @@ void show_regs(struct pt_regs *regs)
if (cregs) if (cregs)
show_callee_regs(cregs); show_callee_regs(cregs);
free_page((unsigned long)buf); preempt_disable();
} }
void show_kernel_fault_diag(const char *str, struct pt_regs *regs, void show_kernel_fault_diag(const char *str, struct pt_regs *regs,

View File

@ -7,11 +7,39 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cache.h>
#undef PREALLOC_NOT_AVAIL /*
* The memset implementation below is optimized to use prefetchw and prealloc
* instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
* If you want to implement optimized memset for other possible L1 data cache
* line lengths (32B and 128B) you should rewrite code carefully checking
* we don't call any prefetchw/prealloc instruction for L1 cache lines which
* don't belongs to memset area.
*/
#if L1_CACHE_SHIFT == 6
.macro PREALLOC_INSTR reg, off
prealloc [\reg, \off]
.endm
.macro PREFETCHW_INSTR reg, off
prefetchw [\reg, \off]
.endm
#else
.macro PREALLOC_INSTR
.endm
.macro PREFETCHW_INSTR
.endm
#endif
ENTRY_CFI(memset) ENTRY_CFI(memset)
prefetchw [r0] ; Prefetch the write location PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
mov.f 0, r2 mov.f 0, r2
;;; if size is zero ;;; if size is zero
jz.d [blink] jz.d [blink]
@ -48,11 +76,8 @@ ENTRY_CFI(memset)
lpnz @.Lset64bytes lpnz @.Lset64bytes
;; LOOP START ;; LOOP START
#ifdef PREALLOC_NOT_AVAIL PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
prefetchw [r3, 64] ;Prefetch the next write location
#else
prealloc [r3, 64]
#endif
#ifdef CONFIG_ARC_HAS_LL64 #ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
@ -85,7 +110,6 @@ ENTRY_CFI(memset)
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
lpnz .Lset32bytes lpnz .Lset32bytes
;; LOOP START ;; LOOP START
prefetchw [r3, 32] ;Prefetch the next write location
#ifdef CONFIG_ARC_HAS_LL64 #ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8] std.ab r4, [r3, 8]
std.ab r4, [r3, 8] std.ab r4, [r3, 8]

View File

@ -141,12 +141,17 @@ good_area:
*/ */
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
/* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
if (fatal_signal_pending(current)) { if (fatal_signal_pending(current)) {
if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
up_read(&mm->mmap_sem); /*
if (user_mode(regs)) * if fault retry, mmap_sem already relinquished by core mm
* so OK to return to user mode (with signal handled first)
*/
if (fault & VM_FAULT_RETRY) {
if (!user_mode(regs))
goto no_context;
return; return;
}
} }
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);

View File

@ -119,7 +119,8 @@ void __init setup_arch_memory(void)
*/ */
memblock_add_node(low_mem_start, low_mem_sz, 0); memblock_add_node(low_mem_start, low_mem_sz, 0);
memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); memblock_reserve(CONFIG_LINUX_LINK_BASE,
__pa(_end) - CONFIG_LINUX_LINK_BASE);
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size) { if (phys_initrd_size) {

View File

@ -1 +1,95 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
#define _ASM_ARM_XEN_PAGE_COHERENT_H
#include <linux/dma-mapping.h>
#include <asm/page.h>
#include <xen/arm/page-coherent.h> #include <xen/arm/page-coherent.h>
static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dev_dma_ops)
return dev->archdata.dev_dma_ops;
return get_arch_dma_ops(NULL);
}
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
{
return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
}
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
{
xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
unsigned long page_pfn = page_to_xen_pfn(page);
unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
unsigned long compound_pages =
(1<<compound_order(page)) * XEN_PFN_PER_PAGE;
bool local = (page_pfn <= dev_pfn) &&
(dev_pfn - page_pfn < compound_pages);
/*
* Dom0 is mapped 1:1, while the Linux page can span across
* multiple Xen pages, it's not possible for it to contain a
* mix of local and foreign Xen pages. So if the first xen_pfn
* == mfn the page is local otherwise it's a foreign page
* grant-mapped in dom0. If the page is local we can safely
* call the native dma_ops function, otherwise we call the xen
* specific function.
*/
if (local)
xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
else
__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
}
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
unsigned long pfn = PFN_DOWN(handle);
/*
* Dom0 is mapped 1:1, while the Linux page can be spanned accross
* multiple Xen page, it's not possible to have a mix of local and
* foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
* foreign mfn will always return false. If the page is local we can
* safely call the native dma_ops function, otherwise we call the xen
* specific function.
*/
if (pfn_valid(pfn)) {
if (xen_get_dma_ops(hwdev)->unmap_page)
xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
} else
__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
}
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
unsigned long pfn = PFN_DOWN(handle);
if (pfn_valid(pfn)) {
if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
} else
__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
}
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
unsigned long pfn = PFN_DOWN(handle);
if (pfn_valid(pfn)) {
if (xen_get_dma_ops(hwdev)->sync_single_for_device)
xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
} else
__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
}
#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */

View File

@ -60,8 +60,6 @@
#ifdef CONFIG_KASAN_SW_TAGS #ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
#else
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__

View File

@ -20,9 +20,6 @@ struct dev_archdata {
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */ void *iommu; /* private IOMMU data */
#endif #endif
#ifdef CONFIG_XEN
const struct dma_map_ops *dev_dma_ops;
#endif
}; };
struct pdev_archdata { struct pdev_archdata {

View File

@ -60,8 +60,11 @@ static inline bool arm64_kernel_use_ng_mappings(void)
* later determine that kpti is required, then * later determine that kpti is required, then
* kpti_install_ng_mappings() will make them non-global. * kpti_install_ng_mappings() will make them non-global.
*/ */
if (arm64_kernel_unmapped_at_el0())
return true;
if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return arm64_kernel_unmapped_at_el0(); return false;
/* /*
* KASLR is enabled so we're going to be enabling kpti on non-broken * KASLR is enabled so we're going to be enabling kpti on non-broken

View File

@ -1 +1,77 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
#define _ASM_ARM64_XEN_PAGE_COHERENT_H
#include <linux/dma-mapping.h>
#include <asm/page.h>
#include <xen/arm/page-coherent.h> #include <xen/arm/page-coherent.h>
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
{
return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
}
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
{
dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
}
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
unsigned long pfn = PFN_DOWN(handle);
if (pfn_valid(pfn))
dma_direct_sync_single_for_cpu(hwdev, handle, size, dir);
else
__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
}
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
unsigned long pfn = PFN_DOWN(handle);
if (pfn_valid(pfn))
dma_direct_sync_single_for_device(hwdev, handle, size, dir);
else
__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
unsigned long page_pfn = page_to_xen_pfn(page);
unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
unsigned long compound_pages =
(1<<compound_order(page)) * XEN_PFN_PER_PAGE;
bool local = (page_pfn <= dev_pfn) &&
(dev_pfn - page_pfn < compound_pages);
if (local)
dma_direct_map_page(hwdev, page, offset, size, dir, attrs);
else
__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
}
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
unsigned long pfn = PFN_DOWN(handle);
/*
* Dom0 is mapped 1:1, while the Linux page can be spanned accross
* multiple Xen page, it's not possible to have a mix of local and
* foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
* foreign mfn will always return false. If the page is local we can
* safely call the native dma_ops function, otherwise we call the xen
* specific function.
*/
if (pfn_valid(pfn))
dma_direct_unmap_page(hwdev, handle, size, dir, attrs);
else
__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
}
#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */

View File

@ -14,6 +14,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/kernel-pgtable.h> #include <asm/kernel-pgtable.h>
#include <asm/memory.h> #include <asm/memory.h>
@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt)
return ret; return ret;
} }
static __init const u8 *get_cmdline(void *fdt) static __init const u8 *kaslr_get_cmdline(void *fdt)
{ {
static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* Check if 'nokaslr' appears on the command line, and * Check if 'nokaslr' appears on the command line, and
* return 0 if that is the case. * return 0 if that is the case.
*/ */
cmdline = get_cmdline(fdt); cmdline = kaslr_get_cmdline(fdt);
str = strstr(cmdline, "nokaslr"); str = strstr(cmdline, "nokaslr");
if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
return 0; return 0;
@ -169,5 +170,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
module_alloc_base &= PAGE_MASK; module_alloc_base &= PAGE_MASK;
__flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
__flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
return offset; return offset;
} }

View File

@ -466,9 +466,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
__iommu_setup_dma_ops(dev, dma_base, size, iommu); __iommu_setup_dma_ops(dev, dma_base, size, iommu);
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
if (xen_initial_domain()) { if (xen_initial_domain())
dev->archdata.dev_dma_ops = dev->dma_ops;
dev->dma_ops = xen_dma_ops; dev->dma_ops = xen_dma_ops;
}
#endif #endif
} }

View File

@ -37,8 +37,6 @@ libs-y += arch/$(ARCH)/lib/
boot := arch/h8300/boot boot := arch/h8300/boot
archmrproper:
archclean: archclean:
$(Q)$(MAKE) $(clean)=$(boot) $(Q)$(MAKE) $(clean)=$(boot)

View File

@ -16,8 +16,6 @@ KBUILD_DEFCONFIG := generic_defconfig
NM := $(CROSS_COMPILE)nm -B NM := $(CROSS_COMPILE)nm -B
READELF := $(CROSS_COMPILE)readelf READELF := $(CROSS_COMPILE)readelf
export AWK
CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__
OBJCOPYFLAGS := --strip-all OBJCOPYFLAGS := --strip-all

View File

@ -3155,6 +3155,7 @@ config MIPS32_O32
config MIPS32_N32 config MIPS32_N32
bool "Kernel support for n32 binaries" bool "Kernel support for n32 binaries"
depends on 64BIT depends on 64BIT
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select COMPAT select COMPAT
select MIPS32_COMPAT select MIPS32_COMPAT
select SYSVIPC_COMPAT if SYSVIPC select SYSVIPC_COMPAT if SYSVIPC

View File

@ -173,6 +173,31 @@ void __init plat_mem_setup(void)
pm_power_off = bcm47xx_machine_halt; pm_power_off = bcm47xx_machine_halt;
} }
#ifdef CONFIG_BCM47XX_BCMA
static struct device * __init bcm47xx_setup_device(void)
{
struct device *dev;
int err;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
err = dev_set_name(dev, "bcm47xx_soc");
if (err) {
pr_err("Failed to set SoC device name: %d\n", err);
kfree(dev);
return NULL;
}
err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (err)
pr_err("Failed to set SoC DMA mask: %d\n", err);
return dev;
}
#endif
/* /*
* This finishes bus initialization doing things that were not possible without * This finishes bus initialization doing things that were not possible without
* kmalloc. Make sure to call it late enough (after mm_init). * kmalloc. Make sure to call it late enough (after mm_init).
@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void)
if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
int err; int err;
bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
if (!bcm47xx_bus.bcma.dev)
panic("Failed to setup SoC device\n");
err = bcma_host_soc_init(&bcm47xx_bus.bcma); err = bcma_host_soc_init(&bcm47xx_bus.bcma);
if (err) if (err)
panic("Failed to initialize BCMA bus (err %d)", err); panic("Failed to initialize BCMA bus (err %d)", err);
@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void)
#endif #endif
#ifdef CONFIG_BCM47XX_BCMA #ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA: case BCM47XX_BUS_TYPE_BCMA:
if (device_register(bcm47xx_bus.bcma.dev))
pr_err("Failed to register SoC device\n");
bcma_bus_register(&bcm47xx_bus.bcma.bus); bcma_bus_register(&bcm47xx_bus.bcma.bus);
break; break;
#endif #endif

View File

@ -98,7 +98,7 @@ static void octeon_kexec_smp_down(void *ignored)
" sync \n" " sync \n"
" synci ($0) \n"); " synci ($0) \n");
relocated_kexec_smp_wait(NULL); kexec_reboot();
} }
#endif #endif

View File

@ -66,6 +66,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set # CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=1 CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1 CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AR933X=y CONFIG_SERIAL_AR933X=y
CONFIG_SERIAL_AR933X_CONSOLE=y CONFIG_SERIAL_AR933X_CONSOLE=y
# CONFIG_HW_RANDOM is not set # CONFIG_HW_RANDOM is not set

View File

@ -18,8 +18,6 @@
#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32) #define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32)
#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) #define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
#define MIPS_CPU_TIMER_IRQ 7
#define MAX_IM 5 #define MAX_IM 5
#endif /* _FALCON_IRQ__ */ #endif /* _FALCON_IRQ__ */

View File

@ -19,8 +19,6 @@
#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) #define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0)
#define MIPS_CPU_TIMER_IRQ 7
#define MAX_IM 5 #define MAX_IM 5
#endif #endif

View File

@ -74,14 +74,15 @@ static int __init vdma_init(void)
get_order(VDMA_PGTBL_SIZE)); get_order(VDMA_PGTBL_SIZE));
BUG_ON(!pgtbl); BUG_ON(!pgtbl);
dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE); dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl); pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
/* /*
* Clear the R4030 translation table * Clear the R4030 translation table
*/ */
vdma_pgtbl_init(); vdma_pgtbl_init();
r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl)); r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
CPHYSADDR((unsigned long)pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE); r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);

View File

@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = {
.irq_set_type = ltq_eiu_settype, .irq_set_type = ltq_eiu_settype,
}; };
static void ltq_hw_irqdispatch(int module) static void ltq_hw_irq_handler(struct irq_desc *desc)
{ {
int module = irq_desc_get_irq(desc) - 2;
u32 irq; u32 irq;
int hwirq;
irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
if (irq == 0) if (irq == 0)
@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module)
* other bits might be bogus * other bits might be bogus
*/ */
irq = __fls(irq); irq = __fls(irq);
do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
/* if this is a EBU irq, we need to ack it or get a deadlock */ /* if this is a EBU irq, we need to ack it or get a deadlock */
if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module)
LTQ_EBU_PCC_ISTAT); LTQ_EBU_PCC_ISTAT);
} }
#define DEFINE_HWx_IRQDISPATCH(x) \
static void ltq_hw ## x ## _irqdispatch(void) \
{ \
ltq_hw_irqdispatch(x); \
}
DEFINE_HWx_IRQDISPATCH(0)
DEFINE_HWx_IRQDISPATCH(1)
DEFINE_HWx_IRQDISPATCH(2)
DEFINE_HWx_IRQDISPATCH(3)
DEFINE_HWx_IRQDISPATCH(4)
#if MIPS_CPU_TIMER_IRQ == 7
static void ltq_hw5_irqdispatch(void)
{
do_IRQ(MIPS_CPU_TIMER_IRQ);
}
#else
DEFINE_HWx_IRQDISPATCH(5)
#endif
static void ltq_hw_irq_handler(struct irq_desc *desc)
{
ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
int irq;
if (!pending) {
spurious_interrupt();
return;
}
pending >>= CAUSEB_IP;
while (pending) {
irq = fls(pending) - 1;
do_IRQ(MIPS_CPU_IRQ_BASE + irq);
pending &= ~BIT(irq);
}
}
static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{ {
struct irq_chip *chip = &ltq_irq_type; struct irq_chip *chip = &ltq_irq_type;
@ -343,38 +303,13 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
for (i = 0; i < MAX_IM; i++) for (i = 0; i < MAX_IM; i++)
irq_set_chained_handler(i + 2, ltq_hw_irq_handler); irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
if (cpu_has_vint) {
pr_info("Setting up vectored interrupts\n");
set_vi_handler(2, ltq_hw0_irqdispatch);
set_vi_handler(3, ltq_hw1_irqdispatch);
set_vi_handler(4, ltq_hw2_irqdispatch);
set_vi_handler(5, ltq_hw3_irqdispatch);
set_vi_handler(6, ltq_hw4_irqdispatch);
set_vi_handler(7, ltq_hw5_irqdispatch);
}
ltq_domain = irq_domain_add_linear(node, ltq_domain = irq_domain_add_linear(node,
(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
&irq_domain_ops, 0); &irq_domain_ops, 0);
#ifndef CONFIG_MIPS_MT_SMP
set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#else
set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#endif
/* tell oprofile which irq to use */ /* tell oprofile which irq to use */
ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
/*
* if the timer irq is not one of the mips irqs we need to
* create a mapping
*/
if (MIPS_CPU_TIMER_IRQ != 7)
irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ);
/* the external interrupts are optional and xway only */ /* the external interrupts are optional and xway only */
eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
@ -411,7 +346,7 @@ EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void) unsigned int get_c0_compare_int(void)
{ {
return MIPS_CPU_TIMER_IRQ; return CP0_LEGACY_COMPARE_IRQ;
} }
static struct of_device_id __initdata of_irq_ids[] = { static struct of_device_id __initdata of_irq_ids[] = {

View File

@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
int irq; int irq;
struct irq_chip *msi; struct irq_chip *msi;
if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
return 0;
} else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;

View File

@ -3,9 +3,6 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S
KBUILD_DEFCONFIG := defconfig KBUILD_DEFCONFIG := defconfig
comma = ,
ifdef CONFIG_FUNCTION_TRACER ifdef CONFIG_FUNCTION_TRACER
arch-y += -malways-save-lp -mno-relax arch-y += -malways-save-lp -mno-relax
endif endif
@ -54,8 +51,6 @@ endif
boot := arch/nds32/boot boot := arch/nds32/boot
core-y += $(boot)/dts/ core-y += $(boot)/dts/
.PHONY: FORCE
Image: vmlinux Image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
@ -68,9 +63,6 @@ prepare: vdso_prepare
vdso_prepare: prepare0 vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h
CLEAN_FILES += include/asm-nds32/constants.h*
# We use MRPROPER_FILES and CLEAN_FILES now
archclean: archclean:
$(Q)$(MAKE) $(clean)=$(boot) $(Q)$(MAKE) $(clean)=$(boot)

View File

@ -20,7 +20,6 @@
KBUILD_DEFCONFIG := or1ksim_defconfig KBUILD_DEFCONFIG := or1ksim_defconfig
OBJCOPYFLAGS := -O binary -R .note -R .comment -S OBJCOPYFLAGS := -O binary -R .note -R .comment -S
LDFLAGS_vmlinux :=
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__
@ -50,5 +49,3 @@ else
BUILTIN_DTB := n BUILTIN_DTB := n
endif endif
core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/ core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/
all: vmlinux

View File

@ -47,6 +47,7 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_DAR, PERF_REG_POWERPC_DAR,
PERF_REG_POWERPC_DSISR, PERF_REG_POWERPC_DSISR,
PERF_REG_POWERPC_SIER, PERF_REG_POWERPC_SIER,
PERF_REG_POWERPC_MMCRA,
PERF_REG_POWERPC_MAX, PERF_REG_POWERPC_MAX,
}; };
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ #endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */

View File

@ -852,11 +852,12 @@ start_here:
/* set up the PTE pointers for the Abatron bdiGDB. /* set up the PTE pointers for the Abatron bdiGDB.
*/ */
tovirt(r6,r6)
lis r5, abatron_pteptrs@h lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l ori r5, r5, abatron_pteptrs@l
stw r5, 0xf0(0) /* Must match your Abatron config file */ stw r5, 0xf0(0) /* Must match your Abatron config file */
tophys(r5,r5) tophys(r5,r5)
lis r6, swapper_pg_dir@h
ori r6, r6, swapper_pg_dir@l
stw r6, 0(r5) stw r6, 0(r5)
/* Now turn on the MMU for real! */ /* Now turn on the MMU for real! */

View File

@ -755,11 +755,12 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (restore_tm_sigcontexts(current, &uc->uc_mcontext, if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
&uc_transact->uc_mcontext)) &uc_transact->uc_mcontext))
goto badframe; goto badframe;
} } else
#endif #endif
/* Fall through, for non-TM restore */ {
if (!MSR_TM_ACTIVE(msr)) {
/* /*
* Fall through, for non-TM restore
*
* Unset MSR[TS] on the thread regs since MSR from user * Unset MSR[TS] on the thread regs since MSR from user
* context does not have MSR active, and recheckpoint was * context does not have MSR active, and recheckpoint was
* not called since restore_tm_sigcontexts() was not called * not called since restore_tm_sigcontexts() was not called

View File

@ -967,13 +967,6 @@ out:
} }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
unsigned long __init arch_syscall_addr(int nr)
{
return sys_call_table[nr*2];
}
#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
#ifdef PPC64_ELF_ABI_v1 #ifdef PPC64_ELF_ABI_v1
char *arch_ftrace_match_adjust(char *str, const char *search) char *arch_ftrace_match_adjust(char *str, const char *search)
{ {

View File

@ -70,6 +70,7 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar), PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar),
PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr), PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr),
PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar), PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar),
PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
}; };
u64 perf_reg_value(struct pt_regs *regs, int idx) u64 perf_reg_value(struct pt_regs *regs, int idx)
@ -83,6 +84,11 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
!is_sier_available())) !is_sier_available()))
return 0; return 0;
if (idx == PERF_REG_POWERPC_MMCRA &&
(IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
IS_ENABLED(CONFIG_PPC32)))
return 0;
return regs_get_register(regs, pt_regs_offset[idx]); return regs_get_register(regs, pt_regs_offset[idx]);
} }

View File

@ -237,12 +237,12 @@ static int ocm_debugfs_show(struct seq_file *m, void *v)
continue; continue;
seq_printf(m, "PPC4XX OCM : %d\n", ocm->index); seq_printf(m, "PPC4XX OCM : %d\n", ocm->index);
seq_printf(m, "PhysAddr : %pa[p]\n", &(ocm->phys)); seq_printf(m, "PhysAddr : %pa\n", &(ocm->phys));
seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal); seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal);
seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal); seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal);
seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal); seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal);
seq_printf(m, "NC.PhysAddr : %pa[p]\n", &(ocm->nc.phys)); seq_printf(m, "NC.PhysAddr : %pa\n", &(ocm->nc.phys));
seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt); seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt);
seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal); seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal);
seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree); seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree);
@ -252,7 +252,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v)
blk->size, blk->owner); blk->size, blk->owner);
} }
seq_printf(m, "\nC.PhysAddr : %pa[p]\n", &(ocm->c.phys)); seq_printf(m, "\nC.PhysAddr : %pa\n", &(ocm->c.phys));
seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt); seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt);
seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal); seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal);
seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree); seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree);

View File

@ -538,8 +538,7 @@ static void __init chrp_init_IRQ(void)
/* see if there is a keyboard in the device tree /* see if there is a keyboard in the device tree
with a parent of type "adb" */ with a parent of type "adb" */
for_each_node_by_name(kbd, "keyboard") for_each_node_by_name(kbd, "keyboard")
if (kbd->parent && kbd->parent->type if (of_node_is_type(kbd->parent, "adb"))
&& strcmp(kbd->parent->type, "adb") == 0)
break; break;
of_node_put(kbd); of_node_put(kbd);
if (kbd) if (kbd)

View File

@ -564,7 +564,7 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
} }
} else { } else {
/* Create a group for 1 GPU and attached NPUs for POWER8 */ /* Create a group for 1 GPU and attached NPUs for POWER8 */
pe->npucomp = kzalloc(sizeof(pe->npucomp), GFP_KERNEL); pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL);
table_group = &pe->npucomp->table_group; table_group = &pe->npucomp->table_group;
table_group->ops = &pnv_npu_peers_ops; table_group->ops = &pnv_npu_peers_ops;
iommu_register_group(table_group, hose->global_number, iommu_register_group(table_group, hose->global_number,

View File

@ -2681,7 +2681,8 @@ static void pnv_pci_ioda_setup_iommu_api(void)
list_for_each_entry(hose, &hose_list, list_node) { list_for_each_entry(hose, &hose_list, list_node) {
phb = hose->private_data; phb = hose->private_data;
if (phb->type == PNV_PHB_NPU_NVLINK) if (phb->type == PNV_PHB_NPU_NVLINK ||
phb->type == PNV_PHB_NPU_OCAPI)
continue; continue;
list_for_each_entry(pe, &phb->ioda.pe_list, list) { list_for_each_entry(pe, &phb->ioda.pe_list, list) {

View File

@ -264,7 +264,9 @@ void __init pSeries_final_fixup(void)
if (!of_device_is_compatible(nvdn->parent, if (!of_device_is_compatible(nvdn->parent,
"ibm,power9-npu")) "ibm,power9-npu"))
continue; continue;
#ifdef CONFIG_PPC_POWERNV
WARN_ON_ONCE(pnv_npu2_init(hose)); WARN_ON_ONCE(pnv_npu2_init(hose));
#endif
break; break;
} }
} }

View File

@ -25,7 +25,7 @@ static inline int init_new_context(struct task_struct *tsk,
atomic_set(&mm->context.flush_count, 0); atomic_set(&mm->context.flush_count, 0);
mm->context.gmap_asce = 0; mm->context.gmap_asce = 0;
mm->context.flush_mm = 0; mm->context.flush_mm = 0;
mm->context.compat_mm = 0; mm->context.compat_mm = test_thread_flag(TIF_31BIT);
#ifdef CONFIG_PGSTE #ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste || mm->context.alloc_pgste = page_table_allocate_pgste ||
test_thread_flag(TIF_PGSTE) || test_thread_flag(TIF_PGSTE) ||
@ -90,8 +90,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (prev == next)
return;
S390_lowcore.user_asce = next->context.asce; S390_lowcore.user_asce = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
/* Clear previous user-ASCE from CR1 and CR7 */ /* Clear previous user-ASCE from CR1 and CR7 */
@ -103,7 +101,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
__ctl_load(S390_lowcore.vdso_asce, 7, 7); __ctl_load(S390_lowcore.vdso_asce, 7, 7);
clear_cpu_flag(CIF_ASCE_SECONDARY); clear_cpu_flag(CIF_ASCE_SECONDARY);
} }
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); if (prev != next)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
} }
#define finish_arch_post_lock_switch finish_arch_post_lock_switch #define finish_arch_post_lock_switch finish_arch_post_lock_switch

View File

@ -63,10 +63,10 @@ static noinline __init void detect_machine_type(void)
if (stsi(vmms, 3, 2, 2) || !vmms->count) if (stsi(vmms, 3, 2, 2) || !vmms->count)
return; return;
/* Running under KVM? If not we assume z/VM */ /* Detect known hypervisors */
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
else else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
S390_lowcore.machine_flags |= MACHINE_FLAG_VM; S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
} }

View File

@ -1006,6 +1006,8 @@ void __init setup_arch(char **cmdline_p)
pr_info("Linux is running under KVM in 64-bit mode\n"); pr_info("Linux is running under KVM in 64-bit mode\n");
else if (MACHINE_IS_LPAR) else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 64-bit mode\n"); pr_info("Linux is running natively in 64-bit mode\n");
else
pr_info("Linux is running as a guest in 64-bit mode\n");
/* Have one command line that is parsed and saved in /proc/cmdline */ /* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */ /* boot_command_line has been already set up in early.c */

View File

@ -381,8 +381,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
*/ */
void smp_call_ipl_cpu(void (*func)(void *), void *data) void smp_call_ipl_cpu(void (*func)(void *), void *data)
{ {
struct lowcore *lc = pcpu_devices->lowcore;
if (pcpu_devices[0].address == stap())
lc = &S390_lowcore;
pcpu_delegate(&pcpu_devices[0], func, data, pcpu_delegate(&pcpu_devices[0], func, data,
pcpu_devices->lowcore->nodat_stack); lc->nodat_stack);
} }
int smp_find_processor_id(u16 address) int smp_find_processor_id(u16 address)
@ -1166,7 +1171,11 @@ static ssize_t __ref rescan_store(struct device *dev,
{ {
int rc; int rc;
rc = lock_device_hotplug_sysfs();
if (rc)
return rc;
rc = smp_rescan_cpus(); rc = smp_rescan_cpus();
unlock_device_hotplug();
return rc ? rc : count; return rc ? rc : count;
} }
static DEVICE_ATTR_WO(rescan); static DEVICE_ATTR_WO(rescan);

View File

@ -224,10 +224,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_pages = vdso64_pages; vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
if (is_compat_task()) { mm->context.compat_mm = is_compat_task();
if (mm->context.compat_mm)
vdso_pages = vdso32_pages; vdso_pages = vdso32_pages;
mm->context.compat_mm = 1;
}
#endif #endif
/* /*
* vDSO has a problem and was disabled, just don't "enable" it for * vDSO has a problem and was disabled, just don't "enable" it for

View File

@ -198,7 +198,7 @@ config X86
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select NEED_SG_DMA_LENGTH select NEED_SG_DMA_LENGTH
select PCI_DOMAINS if PCI select PCI_DOMAINS if PCI
select PCI_LOCKLESS_CONFIG select PCI_LOCKLESS_CONFIG if PCI
select PERF_EVENTS select PERF_EVENTS
select RTC_LIB select RTC_LIB
select RTC_MC146818_LIB select RTC_MC146818_LIB
@ -617,7 +617,7 @@ config X86_INTEL_QUARK
config X86_INTEL_LPSS config X86_INTEL_LPSS
bool "Intel Low Power Subsystem Support" bool "Intel Low Power Subsystem Support"
depends on X86 && ACPI depends on X86 && ACPI && PCI
select COMMON_CLK select COMMON_CLK
select PINCTRL select PINCTRL
select IOSF_MBI select IOSF_MBI

View File

@ -361,7 +361,8 @@ ENTRY(entry_INT80_compat)
/* Need to switch before accessing the thread stack. */ /* Need to switch before accessing the thread stack. */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
movq %rsp, %rdi /* In the Xen PV case we already run on the thread stack. */
ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
pushq 6*8(%rdi) /* regs->ss */ pushq 6*8(%rdi) /* regs->ss */
@ -370,8 +371,9 @@ ENTRY(entry_INT80_compat)
pushq 3*8(%rdi) /* regs->cs */ pushq 3*8(%rdi) /* regs->cs */
pushq 2*8(%rdi) /* regs->ip */ pushq 2*8(%rdi) /* regs->ip */
pushq 1*8(%rdi) /* regs->orig_ax */ pushq 1*8(%rdi) /* regs->orig_ax */
pushq (%rdi) /* pt_regs->di */ pushq (%rdi) /* pt_regs->di */
.Lint80_keep_stack:
pushq %rsi /* pt_regs->si */ pushq %rsi /* pt_regs->si */
xorl %esi, %esi /* nospec si */ xorl %esi, %esi /* nospec si */
pushq %rdx /* pt_regs->dx */ pushq %rdx /* pt_regs->dx */

View File

@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
/*
* Init a new mm. Used on mm copies, like at fork()
* and on mm's that are brand-new, like at execve().
*/
static inline int init_new_context(struct task_struct *tsk, static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm) struct mm_struct *mm)
{ {
@ -228,8 +232,22 @@ do { \
} while (0) } while (0)
#endif #endif
static inline void arch_dup_pkeys(struct mm_struct *oldmm,
struct mm_struct *mm)
{
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return;
/* Duplicate the oldmm pkey state in mm: */
mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
#endif
}
static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
{ {
arch_dup_pkeys(oldmm, mm);
paravirt_arch_dup_mmap(oldmm, mm); paravirt_arch_dup_mmap(oldmm, mm);
return ldt_dup_context(oldmm, mm); return ldt_dup_context(oldmm, mm);
} }

View File

@ -711,7 +711,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
{ {
if (unlikely(!access_ok(ptr,len))) if (unlikely(!access_ok(ptr,len)))
return 0; return 0;
__uaccess_begin(); __uaccess_begin_nospec();
return 1; return 1;
} }
#define user_access_begin(a,b) user_access_begin(a,b) #define user_access_begin(a,b) user_access_begin(a,b)

View File

@ -470,6 +470,7 @@ int crash_load_segments(struct kimage *image)
kbuf.memsz = kbuf.bufsz; kbuf.memsz = kbuf.bufsz;
kbuf.buf_align = ELF_CORE_HEADER_ALIGN; kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf); ret = kexec_add_buffer(&kbuf);
if (ret) { if (ret) {
vfree((void *)image->arch.elf_headers); vfree((void *)image->arch.elf_headers);

View File

@ -21,10 +21,6 @@
#define HPET_MASK CLOCKSOURCE_MASK(32) #define HPET_MASK CLOCKSOURCE_MASK(32)
/* FSEC = 10^-15
NSEC = 10^-9 */
#define FSEC_PER_NSEC 1000000L
#define HPET_DEV_USED_BIT 2 #define HPET_DEV_USED_BIT 2
#define HPET_DEV_USED (1 << HPET_DEV_USED_BIT) #define HPET_DEV_USED (1 << HPET_DEV_USED_BIT)
#define HPET_DEV_VALID 0x8 #define HPET_DEV_VALID 0x8

View File

@ -434,6 +434,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
kbuf.memsz = PAGE_ALIGN(header->init_size); kbuf.memsz = PAGE_ALIGN(header->init_size);
kbuf.buf_align = header->kernel_alignment; kbuf.buf_align = header->kernel_alignment;
kbuf.buf_min = MIN_KERNEL_LOAD_ADDR; kbuf.buf_min = MIN_KERNEL_LOAD_ADDR;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf); ret = kexec_add_buffer(&kbuf);
if (ret) if (ret)
goto out_free_params; goto out_free_params;
@ -448,6 +449,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
kbuf.bufsz = kbuf.memsz = initrd_len; kbuf.bufsz = kbuf.memsz = initrd_len;
kbuf.buf_align = PAGE_SIZE; kbuf.buf_align = PAGE_SIZE;
kbuf.buf_min = MIN_INITRD_LOAD_ADDR; kbuf.buf_min = MIN_INITRD_LOAD_ADDR;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf); ret = kexec_add_buffer(&kbuf);
if (ret) if (ret)
goto out_free_params; goto out_free_params;

View File

@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
#else #else
u64 ipi_bitmap = 0; u64 ipi_bitmap = 0;
#endif #endif
long ret;
if (cpumask_empty(mask)) if (cpumask_empty(mask))
return; return;
@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
max = apic_id < max ? max : apic_id; max = apic_id < max ? max : apic_id;
} else { } else {
kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
min = max = apic_id; min = max = apic_id;
ipi_bitmap = 0; ipi_bitmap = 0;
} }
@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
} }
if (ipi_bitmap) { if (ipi_bitmap) {
kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
} }
local_irq_restore(flags); local_irq_restore(flags);

View File

@ -297,15 +297,16 @@ static int __init tsc_setup(char *str)
__setup("tsc=", tsc_setup); __setup("tsc=", tsc_setup);
#define MAX_RETRIES 5 #define MAX_RETRIES 5
#define SMI_TRESHOLD 50000 #define TSC_DEFAULT_THRESHOLD 0x20000
/* /*
* Read TSC and the reference counters. Take care of SMI disturbance * Read TSC and the reference counters. Take care of any disturbances
*/ */
static u64 tsc_read_refs(u64 *p, int hpet) static u64 tsc_read_refs(u64 *p, int hpet)
{ {
u64 t1, t2; u64 t1, t2;
u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
int i; int i;
for (i = 0; i < MAX_RETRIES; i++) { for (i = 0; i < MAX_RETRIES; i++) {
@ -315,7 +316,7 @@ static u64 tsc_read_refs(u64 *p, int hpet)
else else
*p = acpi_pm_read_early(); *p = acpi_pm_read_early();
t2 = get_cycles(); t2 = get_cycles();
if ((t2 - t1) < SMI_TRESHOLD) if ((t2 - t1) < thresh)
return t2; return t2;
} }
return ULLONG_MAX; return ULLONG_MAX;
@ -703,15 +704,15 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
* zero. In each wait loop iteration we read the TSC and check * zero. In each wait loop iteration we read the TSC and check
* the delta to the previous read. We keep track of the min * the delta to the previous read. We keep track of the min
* and max values of that delta. The delta is mostly defined * and max values of that delta. The delta is mostly defined
* by the IO time of the PIT access, so we can detect when a * by the IO time of the PIT access, so we can detect when
* SMI/SMM disturbance happened between the two reads. If the * any disturbance happened between the two reads. If the
* maximum time is significantly larger than the minimum time, * maximum time is significantly larger than the minimum time,
* then we discard the result and have another try. * then we discard the result and have another try.
* *
* 2) Reference counter. If available we use the HPET or the * 2) Reference counter. If available we use the HPET or the
* PMTIMER as a reference to check the sanity of that value. * PMTIMER as a reference to check the sanity of that value.
* We use separate TSC readouts and check inside of the * We use separate TSC readouts and check inside of the
* reference read for a SMI/SMM disturbance. We dicard * reference read for any possible disturbance. We dicard
* disturbed values here as well. We do that around the PIT * disturbed values here as well. We do that around the PIT
* calibration delay loop as we have to wait for a certain * calibration delay loop as we have to wait for a certain
* amount of time anyway. * amount of time anyway.
@ -744,7 +745,7 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
if (ref1 == ref2) if (ref1 == ref2)
continue; continue;
/* Check, whether the sampling was disturbed by an SMI */ /* Check, whether the sampling was disturbed */
if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
continue; continue;
@ -1268,7 +1269,7 @@ static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
*/ */
static void tsc_refine_calibration_work(struct work_struct *work) static void tsc_refine_calibration_work(struct work_struct *work)
{ {
static u64 tsc_start = -1, ref_start; static u64 tsc_start = ULLONG_MAX, ref_start;
static int hpet; static int hpet;
u64 tsc_stop, ref_stop, delta; u64 tsc_stop, ref_stop, delta;
unsigned long freq; unsigned long freq;
@ -1283,14 +1284,15 @@ static void tsc_refine_calibration_work(struct work_struct *work)
* delayed the first time we expire. So set the workqueue * delayed the first time we expire. So set the workqueue
* again once we know timers are working. * again once we know timers are working.
*/ */
if (tsc_start == -1) { if (tsc_start == ULLONG_MAX) {
restart:
/* /*
* Only set hpet once, to avoid mixing hardware * Only set hpet once, to avoid mixing hardware
* if the hpet becomes enabled later. * if the hpet becomes enabled later.
*/ */
hpet = is_hpet_enabled(); hpet = is_hpet_enabled();
schedule_delayed_work(&tsc_irqwork, HZ);
tsc_start = tsc_read_refs(&ref_start, hpet); tsc_start = tsc_read_refs(&ref_start, hpet);
schedule_delayed_work(&tsc_irqwork, HZ);
return; return;
} }
@ -1300,9 +1302,9 @@ static void tsc_refine_calibration_work(struct work_struct *work)
if (ref_start == ref_stop) if (ref_start == ref_stop)
goto out; goto out;
/* Check, whether the sampling was disturbed by an SMI */ /* Check, whether the sampling was disturbed */
if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX) if (tsc_stop == ULLONG_MAX)
goto out; goto restart;
delta = tsc_stop - tsc_start; delta = tsc_stop - tsc_start;
delta *= 1000000LL; delta *= 1000000LL;

View File

@ -2,10 +2,6 @@
ccflags-y += -Iarch/x86/kvm ccflags-y += -Iarch/x86/kvm
CFLAGS_x86.o := -I.
CFLAGS_svm.o := -I.
CFLAGS_vmx.o := -I.
KVM := ../../../virt/kvm KVM := ../../../virt/kvm
kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \

View File

@ -1636,7 +1636,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
ret = kvm_hvcall_signal_event(vcpu, fast, ingpa); ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
if (ret != HV_STATUS_INVALID_PORT_ID) if (ret != HV_STATUS_INVALID_PORT_ID)
break; break;
/* maybe userspace knows this conn_id: fall through */ /* fall through - maybe userspace knows this conn_id. */
case HVCALL_POST_MESSAGE: case HVCALL_POST_MESSAGE:
/* don't bother userspace if it has no way to handle it */ /* don't bother userspace if it has no way to handle it */
if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
@ -1832,7 +1832,6 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE; ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
ent->eax |= HV_X64_MSR_RESET_AVAILABLE; ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
ent->eax |= HV_X64_MSR_GUEST_IDLE_AVAILABLE;
ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS; ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT; ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
@ -1848,11 +1847,11 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
case HYPERV_CPUID_ENLIGHTMENT_INFO: case HYPERV_CPUID_ENLIGHTMENT_INFO:
ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
ent->eax |= HV_X64_SYSTEM_RESET_RECOMMENDED;
ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; if (evmcs_ver)
ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
/* /*
* Default number of spinlock retry attempts, matches * Default number of spinlock retry attempts, matches

View File

@ -1035,6 +1035,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
switch (delivery_mode) { switch (delivery_mode) {
case APIC_DM_LOWEST: case APIC_DM_LOWEST:
vcpu->arch.apic_arb_prio++; vcpu->arch.apic_arb_prio++;
/* fall through */
case APIC_DM_FIXED: case APIC_DM_FIXED:
if (unlikely(trig_mode && !level)) if (unlikely(trig_mode && !level))
break; break;
@ -1874,6 +1875,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_LVT0: case APIC_LVT0:
apic_manage_nmi_watchdog(apic, val); apic_manage_nmi_watchdog(apic, val);
/* fall through */
case APIC_LVTTHMR: case APIC_LVTTHMR:
case APIC_LVTPC: case APIC_LVTPC:
case APIC_LVT1: case APIC_LVT1:

View File

@ -4371,6 +4371,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
rsvd_bits(maxphyaddr, 51); rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[1][4] =
rsvd_check->rsvd_bits_mask[0][4]; rsvd_check->rsvd_bits_mask[0][4];
/* fall through */
case PT64_ROOT_4LEVEL: case PT64_ROOT_4LEVEL:
rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
nonleaf_bit8_rsvd | rsvd_bits(7, 7) | nonleaf_bit8_rsvd | rsvd_bits(7, 7) |

View File

@ -3414,6 +3414,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
kvm_mmu_reset_context(&svm->vcpu); kvm_mmu_reset_context(&svm->vcpu);
kvm_mmu_load(&svm->vcpu); kvm_mmu_load(&svm->vcpu);
/*
* Drop what we picked up for L2 via svm_complete_interrupts() so it
* doesn't end up in L1.
*/
svm->vcpu.arch.nmi_injected = false;
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);
return 0; return 0;
} }
@ -4395,7 +4403,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_IA32_APICBASE: case MSR_IA32_APICBASE:
if (kvm_vcpu_apicv_active(vcpu)) if (kvm_vcpu_apicv_active(vcpu))
avic_update_vapic_bar(to_svm(vcpu), data); avic_update_vapic_bar(to_svm(vcpu), data);
/* Follow through */ /* Fall through */
default: default:
return kvm_set_msr_common(vcpu, msr); return kvm_set_msr_common(vcpu, msr);
} }
@ -4504,28 +4512,19 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
kvm_lapic_reg_write(apic, APIC_ICR, icrl); kvm_lapic_reg_write(apic, APIC_ICR, icrl);
break; break;
case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: { case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
int i;
struct kvm_vcpu *vcpu;
struct kvm *kvm = svm->vcpu.kvm;
struct kvm_lapic *apic = svm->vcpu.arch.apic; struct kvm_lapic *apic = svm->vcpu.arch.apic;
/* /*
* At this point, we expect that the AVIC HW has already * Update ICR high and low, then emulate sending IPI,
* set the appropriate IRR bits on the valid target * which is handled when writing APIC_ICR.
* vcpus. So, we just need to kick the appropriate vcpu.
*/ */
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
bool m = kvm_apic_match_dest(vcpu, apic, kvm_lapic_reg_write(apic, APIC_ICR, icrl);
icrl & KVM_APIC_SHORT_MASK,
GET_APIC_DEST_FIELD(icrh),
icrl & KVM_APIC_DEST_MASK);
if (m && !avic_vcpu_is_running(vcpu))
kvm_vcpu_wake_up(vcpu);
}
break; break;
} }
case AVIC_IPI_FAILURE_INVALID_TARGET: case AVIC_IPI_FAILURE_INVALID_TARGET:
WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
index, svm->vcpu.vcpu_id, icrh, icrl);
break; break;
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE: case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
WARN_ONCE(1, "Invalid backing page\n"); WARN_ONCE(1, "Invalid backing page\n");

View File

@ -1465,7 +1465,7 @@ TRACE_EVENT(kvm_hv_send_ipi_ex,
#endif /* _TRACE_KVM_H */ #endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH arch/x86/kvm #define TRACE_INCLUDE_PATH ../../arch/x86/kvm
#undef TRACE_INCLUDE_FILE #undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace #define TRACE_INCLUDE_FILE trace

View File

@ -332,16 +332,17 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version) uint16_t *vmcs_version)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
bool evmcs_already_enabled = vmx->nested.enlightened_vmcs_enabled;
vmx->nested.enlightened_vmcs_enabled = true;
if (vmcs_version) if (vmcs_version)
*vmcs_version = nested_get_evmcs_version(vcpu); *vmcs_version = nested_get_evmcs_version(vcpu);
/* We don't support disabling the feature for simplicity. */ /* We don't support disabling the feature for simplicity. */
if (vmx->nested.enlightened_vmcs_enabled) if (evmcs_already_enabled)
return 0; return 0;
vmx->nested.enlightened_vmcs_enabled = true;
vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;

View File

@ -55,7 +55,7 @@ static u16 shadow_read_write_fields[] = {
static int max_shadow_read_write_fields = static int max_shadow_read_write_fields =
ARRAY_SIZE(shadow_read_write_fields); ARRAY_SIZE(shadow_read_write_fields);
void init_vmcs_shadow_fields(void) static void init_vmcs_shadow_fields(void)
{ {
int i, j; int i, j;
@ -4140,11 +4140,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
if (r < 0) if (r < 0)
goto out_vmcs02; goto out_vmcs02;
vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
if (!vmx->nested.cached_vmcs12) if (!vmx->nested.cached_vmcs12)
goto out_cached_vmcs12; goto out_cached_vmcs12;
vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
if (!vmx->nested.cached_shadow_vmcs12) if (!vmx->nested.cached_shadow_vmcs12)
goto out_cached_shadow_vmcs12; goto out_cached_shadow_vmcs12;
@ -5263,13 +5263,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
copy_shadow_to_vmcs12(vmx); copy_shadow_to_vmcs12(vmx);
} }
if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12))) /*
* Copy over the full allocated size of vmcs12 rather than just the size
* of the struct.
*/
if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
return -EFAULT; return -EFAULT;
if (nested_cpu_has_shadow_vmcs(vmcs12) && if (nested_cpu_has_shadow_vmcs(vmcs12) &&
vmcs12->vmcs_link_pointer != -1ull) { vmcs12->vmcs_link_pointer != -1ull) {
if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE, if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
get_shadow_vmcs12(vcpu), sizeof(*vmcs12))) get_shadow_vmcs12(vcpu), VMCS12_SIZE))
return -EFAULT; return -EFAULT;
} }

View File

@ -423,7 +423,7 @@ static void check_ept_pointer_match(struct kvm *kvm)
to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH; to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
} }
int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
void *data) void *data)
{ {
struct kvm_tlb_range *range = data; struct kvm_tlb_range *range = data;
@ -1773,7 +1773,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated && if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
return 1; return 1;
/* Otherwise falls through */ /* Else, falls through */
default: default:
msr = find_msr_entry(vmx, msr_info->index); msr = find_msr_entry(vmx, msr_info->index);
if (msr) { if (msr) {
@ -2014,7 +2014,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
/* Check reserved bit, higher 32 bits should be zero */ /* Check reserved bit, higher 32 bits should be zero */
if ((data >> 32) != 0) if ((data >> 32) != 0)
return 1; return 1;
/* Otherwise falls through */ /* Else, falls through */
default: default:
msr = find_msr_entry(vmx, msr_index); msr = find_msr_entry(vmx, msr_index);
if (msr) { if (msr) {
@ -2344,7 +2344,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
case 37: /* AAT100 */ case 37: /* AAT100 */
case 44: /* BC86,AAY89,BD102 */ case 44: /* BC86,AAY89,BD102 */
case 46: /* BA97 */ case 46: /* BA97 */
_vmexit_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; _vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
_vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
"does not work properly. Using workaround\n"); "does not work properly. Using workaround\n");
@ -6362,72 +6362,9 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs->hv_timer_armed = false; vmx->loaded_vmcs->hv_timer_armed = false;
} }
static void vmx_vcpu_run(struct kvm_vcpu *vcpu) static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long evmcs_rsp;
unsigned long cr3, cr4, evmcs_rsp;
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!enable_vnmi &&
vmx->loaded_vmcs->soft_vnmi_blocked))
vmx->loaded_vmcs->entry_time = ktime_get();
/* Don't enter VMX if guest state is invalid, let the exit handler
start emulation until we arrive back to a valid state */
if (vmx->emulation_required)
return;
if (vmx->ple_window_dirty) {
vmx->ple_window_dirty = false;
vmcs_write32(PLE_WINDOW, vmx->ple_window);
}
if (vmx->nested.need_vmcs12_sync)
nested_sync_from_vmcs12(vcpu);
if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
cr3 = __get_current_cr3_fast();
if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
vmcs_writel(HOST_CR3, cr3);
vmx->loaded_vmcs->host_state.cr3 = cr3;
}
cr4 = cr4_read_shadow();
if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
vmcs_writel(HOST_CR4, cr4);
vmx->loaded_vmcs->host_state.cr4 = cr4;
}
/* When single-stepping over STI and MOV SS, we must clear the
* corresponding interruptibility bits in the guest state. Otherwise
* vmentry fails as it then expects bit 14 (BS) in pending debug
* exceptions being set, but that's not correct for the guest debugging
* case. */
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmx_set_interrupt_shadow(vcpu, 0);
if (static_cpu_has(X86_FEATURE_PKU) &&
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
vcpu->arch.pkru != vmx->host_pkru)
__write_pkru(vcpu->arch.pkru);
pt_guest_enter(vmx);
atomic_switch_perf_msrs(vmx);
vmx_update_hv_timer(vcpu);
/*
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
* it's non-zero. Since vmentry is serialising on affected CPUs, there
* is no need to worry about the conditional branch over the wrmsr
* being speculatively taken.
*/
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
vmx->__launched = vmx->loaded_vmcs->launched; vmx->__launched = vmx->loaded_vmcs->launched;
@ -6567,6 +6504,77 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
, "eax", "ebx", "edi" , "eax", "ebx", "edi"
#endif #endif
); );
}
STACK_FRAME_NON_STANDARD(__vmx_vcpu_run);
static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long cr3, cr4;
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!enable_vnmi &&
vmx->loaded_vmcs->soft_vnmi_blocked))
vmx->loaded_vmcs->entry_time = ktime_get();
/* Don't enter VMX if guest state is invalid, let the exit handler
start emulation until we arrive back to a valid state */
if (vmx->emulation_required)
return;
if (vmx->ple_window_dirty) {
vmx->ple_window_dirty = false;
vmcs_write32(PLE_WINDOW, vmx->ple_window);
}
if (vmx->nested.need_vmcs12_sync)
nested_sync_from_vmcs12(vcpu);
if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
cr3 = __get_current_cr3_fast();
if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
vmcs_writel(HOST_CR3, cr3);
vmx->loaded_vmcs->host_state.cr3 = cr3;
}
cr4 = cr4_read_shadow();
if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
vmcs_writel(HOST_CR4, cr4);
vmx->loaded_vmcs->host_state.cr4 = cr4;
}
/* When single-stepping over STI and MOV SS, we must clear the
* corresponding interruptibility bits in the guest state. Otherwise
* vmentry fails as it then expects bit 14 (BS) in pending debug
* exceptions being set, but that's not correct for the guest debugging
* case. */
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmx_set_interrupt_shadow(vcpu, 0);
if (static_cpu_has(X86_FEATURE_PKU) &&
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
vcpu->arch.pkru != vmx->host_pkru)
__write_pkru(vcpu->arch.pkru);
pt_guest_enter(vmx);
atomic_switch_perf_msrs(vmx);
vmx_update_hv_timer(vcpu);
/*
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
* it's non-zero. Since vmentry is serialising on affected CPUs, there
* is no need to worry about the conditional branch over the wrmsr
* being speculatively taken.
*/
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
__vmx_vcpu_run(vcpu, vmx);
/* /*
* We do not use IBRS in the kernel. If this vCPU has used the * We do not use IBRS in the kernel. If this vCPU has used the
@ -6648,7 +6656,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_recover_nmi_blocking(vmx); vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx); vmx_complete_interrupts(vmx);
} }
STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
static struct kvm *vmx_vm_alloc(void) static struct kvm *vmx_vm_alloc(void)
{ {

View File

@ -3834,6 +3834,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
case KVM_CAP_HYPERV_SYNIC2: case KVM_CAP_HYPERV_SYNIC2:
if (cap->args[0]) if (cap->args[0])
return -EINVAL; return -EINVAL;
/* fall through */
case KVM_CAP_HYPERV_SYNIC: case KVM_CAP_HYPERV_SYNIC:
if (!irqchip_in_kernel(vcpu->kvm)) if (!irqchip_in_kernel(vcpu->kvm))
return -EINVAL; return -EINVAL;
@ -6480,8 +6482,7 @@ restart:
toggle_interruptibility(vcpu, ctxt->interruptibility); toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false; vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip); kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE && if (r == EMULATE_DONE && ctxt->tf)
(ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
kvm_vcpu_do_singlestep(vcpu, &r); kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception || if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP) exception_type(ctxt->exception.vector) == EXCPT_TRAP)
@ -7093,10 +7094,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
case KVM_HC_CLOCK_PAIRING: case KVM_HC_CLOCK_PAIRING:
ret = kvm_pv_clock_pairing(vcpu, a0, a1); ret = kvm_pv_clock_pairing(vcpu, a0, a1);
break; break;
#endif
case KVM_HC_SEND_IPI: case KVM_HC_SEND_IPI:
ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
break; break;
#endif
default: default:
ret = -KVM_ENOSYS; ret = -KVM_ENOSYS;
break; break;
@ -7937,6 +7938,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
vcpu->arch.pv.pv_unhalted = false; vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.mp_state = vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE; KVM_MP_STATE_RUNNABLE;
/* fall through */
case KVM_MP_STATE_RUNNABLE: case KVM_MP_STATE_RUNNABLE:
vcpu->arch.apf.halted = false; vcpu->arch.apf.halted = false;
break; break;

View File

@ -36,8 +36,8 @@ static inline u16 i8254(void)
u16 status, timer; u16 status, timer;
do { do {
outb(I8254_PORT_CONTROL, outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
I8254_CMD_READBACK | I8254_SELECT_COUNTER0); I8254_PORT_CONTROL);
status = inb(I8254_PORT_COUNTER0); status = inb(I8254_PORT_COUNTER0);
timer = inb(I8254_PORT_COUNTER0); timer = inb(I8254_PORT_COUNTER0);
timer |= inb(I8254_PORT_COUNTER0) << 8; timer |= inb(I8254_PORT_COUNTER0) << 8;

View File

@ -158,8 +158,8 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
pmd = pmd_offset(pud, ppd->vaddr); pmd = pmd_offset(pud, ppd->vaddr);
if (pmd_none(*pmd)) { if (pmd_none(*pmd)) {
pte = ppd->pgtable_area; pte = ppd->pgtable_area;
memset(pte, 0, sizeof(pte) * PTRS_PER_PTE); memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE; ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
} }

View File

@ -898,10 +898,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
val = native_read_msr_safe(msr, err); val = native_read_msr_safe(msr, err);
switch (msr) { switch (msr) {
case MSR_IA32_APICBASE: case MSR_IA32_APICBASE:
#ifdef CONFIG_X86_X2APIC val &= ~X2APIC_ENABLE;
if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
#endif
val &= ~X2APIC_ENABLE;
break; break;
} }
return val; return val;

View File

@ -361,8 +361,6 @@ void xen_timer_resume(void)
{ {
int cpu; int cpu;
pvclock_resume();
if (xen_clockevent != &xen_vcpuop_clockevent) if (xen_clockevent != &xen_vcpuop_clockevent)
return; return;
@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = {
}; };
static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
static u64 xen_clock_value_saved;
void xen_save_time_memory_area(void) void xen_save_time_memory_area(void)
{ {
struct vcpu_register_time_memory_area t; struct vcpu_register_time_memory_area t;
int ret; int ret;
xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
if (!xen_clock) if (!xen_clock)
return; return;
@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void)
int ret; int ret;
if (!xen_clock) if (!xen_clock)
return; goto out;
t.addr.v = &xen_clock->pvti; t.addr.v = &xen_clock->pvti;
@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void)
if (ret != 0) if (ret != 0)
pr_notice("Cannot restore secondary vcpu_time_info (err %d)", pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
ret); ret);
out:
/* Need pvclock_resume() before using xen_clocksource_read(). */
pvclock_resume();
xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
} }
static void xen_setup_vsyscall_time_info(void) static void xen_setup_vsyscall_time_info(void)

View File

@ -1154,15 +1154,14 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
} }
/** /**
* __bfq_deactivate_entity - deactivate an entity from its service tree. * __bfq_deactivate_entity - update sched_data and service trees for
* @entity: the entity to deactivate. * entity, so as to represent entity as inactive
* @entity: the entity being deactivated.
* @ins_into_idle_tree: if false, the entity will not be put into the * @ins_into_idle_tree: if false, the entity will not be put into the
* idle tree. * idle tree.
* *
* Deactivates an entity, independently of its previous state. Must * If necessary and allowed, puts entity into the idle tree. NOTE:
* be invoked only if entity is on a service tree. Extracts the entity * entity may be on no tree if in service.
* from that tree, and if necessary and allowed, puts it into the idle
* tree.
*/ */
bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
{ {

View File

@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright (C) 2017 Western Digital Corporation or its affiliates. * Copyright (C) 2017 Western Digital Corporation or its affiliates.
*
* This file is released under the GPL.
*/ */
#include <linux/blkdev.h> #include <linux/blkdev.h>

View File

@ -308,8 +308,9 @@ static const char *const cmd_flag_name[] = {
CMD_FLAG_NAME(PREFLUSH), CMD_FLAG_NAME(PREFLUSH),
CMD_FLAG_NAME(RAHEAD), CMD_FLAG_NAME(RAHEAD),
CMD_FLAG_NAME(BACKGROUND), CMD_FLAG_NAME(BACKGROUND),
CMD_FLAG_NAME(NOUNMAP),
CMD_FLAG_NAME(NOWAIT), CMD_FLAG_NAME(NOWAIT),
CMD_FLAG_NAME(NOUNMAP),
CMD_FLAG_NAME(HIPRI),
}; };
#undef CMD_FLAG_NAME #undef CMD_FLAG_NAME

View File

@ -1906,7 +1906,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{ {
const int is_sync = op_is_sync(bio->bi_opf); const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf); const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf }; struct blk_mq_alloc_data data = { .flags = 0};
struct request *rq; struct request *rq;
struct blk_plug *plug; struct blk_plug *plug;
struct request *same_queue_rq = NULL; struct request *same_queue_rq = NULL;
@ -1928,6 +1928,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
rq_qos_throttle(q, bio); rq_qos_throttle(q, bio);
data.cmd_flags = bio->bi_opf;
rq = blk_mq_get_request(q, bio, &data); rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) { if (unlikely(!rq)) {
rq_qos_cleanup(q, bio); rq_qos_cleanup(q, bio);

View File

@ -597,7 +597,7 @@ static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
rq->wbt_flags |= bio_to_wbt_flags(rwb, bio); rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
} }
void wbt_issue(struct rq_qos *rqos, struct request *rq) static void wbt_issue(struct rq_qos *rqos, struct request *rq)
{ {
struct rq_wb *rwb = RQWB(rqos); struct rq_wb *rwb = RQWB(rqos);
@ -617,7 +617,7 @@ void wbt_issue(struct rq_qos *rqos, struct request *rq)
} }
} }
void wbt_requeue(struct rq_qos *rqos, struct request *rq) static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
{ {
struct rq_wb *rwb = RQWB(rqos); struct rq_wb *rwb = RQWB(rqos);
if (!rwb_enabled(rwb)) if (!rwb_enabled(rwb))

View File

@ -539,6 +539,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
ictx = skcipher_instance_ctx(inst); ictx = skcipher_instance_ctx(inst);
/* Stream cipher, e.g. "xchacha12" */ /* Stream cipher, e.g. "xchacha12" */
crypto_set_skcipher_spawn(&ictx->streamcipher_spawn,
skcipher_crypto_instance(inst));
err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name, err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name,
0, crypto_requires_sync(algt->type, 0, crypto_requires_sync(algt->type,
algt->mask)); algt->mask));
@ -547,6 +549,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
/* Block cipher, e.g. "aes" */ /* Block cipher, e.g. "aes" */
crypto_set_spawn(&ictx->blockcipher_spawn,
skcipher_crypto_instance(inst));
err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name, err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name,
CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK);
if (err) if (err)

View File

@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
return -EINVAL; return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL; return -EINVAL;
if (RTA_PAYLOAD(rta) < sizeof(*param))
/*
* RTA_OK() didn't align the rtattr's payload when validating that it
* fits in the buffer. Yet, the keys should start on the next 4-byte
* aligned boundary. To avoid confusion, require that the rtattr
* payload be exactly the param struct, which has a 4-byte aligned size.
*/
if (RTA_PAYLOAD(rta) != sizeof(*param))
return -EINVAL; return -EINVAL;
BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
param = RTA_DATA(rta); param = RTA_DATA(rta);
keys->enckeylen = be32_to_cpu(param->enckeylen); keys->enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len); key += rta->rta_len;
keylen -= RTA_ALIGN(rta->rta_len); keylen -= rta->rta_len;
if (keylen < keys->enckeylen) if (keylen < keys->enckeylen)
return -EINVAL; return -EINVAL;

View File

@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
struct aead_request *req = areq->data; struct aead_request *req = areq->data;
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
aead_request_complete(req, err); authenc_esn_request_complete(req, err);
} }
static int crypto_authenc_esn_decrypt(struct aead_request *req) static int crypto_authenc_esn_decrypt(struct aead_request *req)

View File

@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m)
for (i = 0; i <= 63; i++) { for (i = 0; i <= 63; i++) {
ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
ss2 = ss1 ^ rol32(a, 12); ss2 = ss1 ^ rol32(a, 12);

View File

@ -41,7 +41,8 @@ acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
acpi-y += acpi_lpss.o acpi_apd.o acpi-$(CONFIG_PCI) += acpi_lpss.o
acpi-y += acpi_apd.o
acpi-y += acpi_platform.o acpi-y += acpi_platform.o
acpi-y += acpi_pnp.o acpi-y += acpi_pnp.o
acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o

View File

@ -1054,18 +1054,6 @@ void __init acpi_early_init(void)
goto error0; goto error0;
} }
/*
* ACPI 2.0 requires the EC driver to be loaded and work before
* the EC device is found in the namespace (i.e. before
* acpi_load_tables() is called).
*
* This is accomplished by looking for the ECDT table, and getting
* the EC parameters out of that.
*
* Ignore the result. Not having an ECDT is not fatal.
*/
status = acpi_ec_ecdt_probe();
#ifdef CONFIG_X86 #ifdef CONFIG_X86
if (!acpi_ioapic) { if (!acpi_ioapic) {
/* compatible (0) means level (3) */ /* compatible (0) means level (3) */
@ -1142,6 +1130,18 @@ static int __init acpi_bus_init(void)
goto error1; goto error1;
} }
/*
* ACPI 2.0 requires the EC driver to be loaded and work before the EC
* device is found in the namespace.
*
* This is accomplished by looking for the ECDT table and getting the EC
* parameters out of that.
*
* Do that before calling acpi_initialize_objects() which may trigger EC
* address space accesses.
*/
acpi_ec_ecdt_probe();
status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX printk(KERN_ERR PREFIX

Some files were not shown because too many files have changed in this diff Show More