spi-geni-qcom: Add new interfaces and utilise them
Merge series from Vijaya Krishna Nivarthi <quic_vnivarth@quicinc.com>: A "known issue" during implementation of SE DMA for spi geni driver was that it does DMA map/unmap internally instead of in spi framework. Current patches remove this hiccup and also clean up code a bit. Testing revealed no regressions and results with 1000 iterations of reading from EC showed no loss of performance. Results ======= Before - Iteration 999, min=5.10, max=5.17, avg=5.14, ints=25129 After - Iteration 999, min=5.10, max=5.20, avg=5.15, ints=25153
This commit is contained in:
commit
30e15cb0fb
|
@ -5,5 +5,5 @@ Changes
|
|||
See https://wiki.samba.org/index.php/LinuxCIFSKernel for summary
|
||||
information about fixes/improvements to CIFS/SMB2/SMB3 support (changes
|
||||
to cifs.ko module) by kernel version (and cifs internal module version).
|
||||
This may be easier to read than parsing the output of "git log fs/cifs"
|
||||
by release.
|
||||
This may be easier to read than parsing the output of
|
||||
"git log fs/smb/client" by release.
|
||||
|
|
|
@ -45,7 +45,7 @@ Installation instructions
|
|||
|
||||
If you have built the CIFS vfs as module (successfully) simply
|
||||
type ``make modules_install`` (or if you prefer, manually copy the file to
|
||||
the modules directory e.g. /lib/modules/2.4.10-4GB/kernel/fs/cifs/cifs.ko).
|
||||
the modules directory e.g. /lib/modules/6.3.0-060300-generic/kernel/fs/smb/client/cifs.ko).
|
||||
|
||||
If you have built the CIFS vfs into the kernel itself, follow the instructions
|
||||
for your distribution on how to install a new kernel (usually you
|
||||
|
@ -66,15 +66,15 @@ If cifs is built as a module, then the size and number of network buffers
|
|||
and maximum number of simultaneous requests to one server can be configured.
|
||||
Changing these from their defaults is not recommended. By executing modinfo::
|
||||
|
||||
modinfo kernel/fs/cifs/cifs.ko
|
||||
modinfo <path to cifs.ko>
|
||||
|
||||
on kernel/fs/cifs/cifs.ko the list of configuration changes that can be made
|
||||
on kernel/fs/smb/client/cifs.ko the list of configuration changes that can be made
|
||||
at module initialization time (by running insmod cifs.ko) can be seen.
|
||||
|
||||
Recommendations
|
||||
===============
|
||||
|
||||
To improve security the SMB2.1 dialect or later (usually will get SMB3) is now
|
||||
To improve security the SMB2.1 dialect or later (usually will get SMB3.1.1) is now
|
||||
the new default. To use old dialects (e.g. to mount Windows XP) use "vers=1.0"
|
||||
on mount (or vers=2.0 for Windows Vista). Note that the CIFS (vers=1.0) is
|
||||
much older and less secure than the default dialect SMB3 which includes
|
||||
|
|
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: Lattice Slave SPI sysCONFIG FPGA manager
|
||||
|
||||
maintainers:
|
||||
- Ivan Bornyakov <i.bornyakov@metrotek.ru>
|
||||
- Vladimir Georgiev <v.georgiev@metrotek.ru>
|
||||
|
||||
description: |
|
||||
Lattice sysCONFIG port, which is used for FPGA configuration, among others,
|
||||
|
|
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: Microchip Polarfire FPGA manager.
|
||||
|
||||
maintainers:
|
||||
- Ivan Bornyakov <i.bornyakov@metrotek.ru>
|
||||
- Vladimir Georgiev <v.georgiev@metrotek.ru>
|
||||
|
||||
description:
|
||||
Device Tree Bindings for Microchip Polarfire FPGA Manager using slave SPI to
|
||||
|
|
|
@ -39,6 +39,12 @@ properties:
|
|||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
vref-supply:
|
||||
description: |
|
||||
External ADC reference voltage supply on VREFH pad. If VERID[MVI] is
|
||||
set, there are additional, internal reference voltages selectable.
|
||||
VREFH1 is always from VREFH pad.
|
||||
|
||||
"#io-channel-cells":
|
||||
const: 1
|
||||
|
||||
|
@ -72,6 +78,7 @@ examples:
|
|||
assigned-clocks = <&clk IMX_SC_R_ADC_0>;
|
||||
assigned-clock-rates = <24000000>;
|
||||
power-domains = <&pd IMX_SC_R_ADC_0>;
|
||||
vref-supply = <®_1v8>;
|
||||
#io-channel-cells = <1>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -90,7 +90,7 @@ patternProperties:
|
|||
of the MAX chips to the GyroADC, while MISO line of each Maxim
|
||||
ADC connects to a shared input pin of the GyroADC.
|
||||
enum:
|
||||
- adi,7476
|
||||
- adi,ad7476
|
||||
- fujitsu,mb88101a
|
||||
- maxim,max1162
|
||||
- maxim,max11100
|
||||
|
|
|
@ -166,6 +166,12 @@ properties:
|
|||
resets:
|
||||
maxItems: 1
|
||||
|
||||
mediatek,broken-save-restore-fw:
|
||||
type: boolean
|
||||
description:
|
||||
Asserts that the firmware on this device has issues saving and restoring
|
||||
GICR registers when the GIC redistributors are powered off.
|
||||
|
||||
dependencies:
|
||||
mbi-ranges: [ msi-controller ]
|
||||
msi-controller: [ mbi-ranges ]
|
||||
|
|
|
@ -70,6 +70,7 @@ properties:
|
|||
dsr-gpios: true
|
||||
rng-gpios: true
|
||||
dcd-gpios: true
|
||||
rs485-rts-active-high: true
|
||||
rts-gpio: true
|
||||
power-domains: true
|
||||
clock-frequency: true
|
||||
|
|
|
@ -8,7 +8,7 @@ Required properties:
|
|||
"ti,tlv320aic32x6" TLV320AIC3206, TLV320AIC3256
|
||||
"ti,tas2505" TAS2505, TAS2521
|
||||
- reg: I2C slave address
|
||||
- supply-*: Required supply regulators are:
|
||||
- *-supply: Required supply regulators are:
|
||||
"iov" - digital IO power supply
|
||||
"ldoin" - LDO power supply
|
||||
"dv" - Digital core power supply
|
||||
|
|
|
@ -64,7 +64,7 @@ properties:
|
|||
description:
|
||||
size of memory intended as internal memory for endpoints
|
||||
buffers expressed in KB
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
$ref: /schemas/types.yaml#/definitions/uint16
|
||||
|
||||
cdns,phyrst-a-enable:
|
||||
description: Enable resetting of PHY if Rx fail is detected
|
||||
|
|
|
@ -287,7 +287,7 @@ properties:
|
|||
description:
|
||||
High-Speed PHY interface selection between UTMI+ and ULPI when the
|
||||
DWC_USB3_HSPHY_INTERFACE has value 3.
|
||||
$ref: /schemas/types.yaml#/definitions/uint8
|
||||
$ref: /schemas/types.yaml#/definitions/string
|
||||
enum: [utmi, ulpi]
|
||||
|
||||
snps,quirk-frame-length-adjustment:
|
||||
|
|
|
@ -72,7 +72,6 @@ Documentation for filesystem implementations.
|
|||
befs
|
||||
bfs
|
||||
btrfs
|
||||
cifs/index
|
||||
ceph
|
||||
coda
|
||||
configfs
|
||||
|
@ -111,6 +110,7 @@ Documentation for filesystem implementations.
|
|||
ramfs-rootfs-initramfs
|
||||
relay
|
||||
romfs
|
||||
smb/index
|
||||
spufs/index
|
||||
squashfs
|
||||
sysfs
|
||||
|
|
|
@ -59,7 +59,7 @@ the root file system via SMB protocol.
|
|||
Enables the kernel to mount the root file system via SMB that are
|
||||
located in the <server-ip> and <share> specified in this option.
|
||||
|
||||
The default mount options are set in fs/cifs/cifsroot.c.
|
||||
The default mount options are set in fs/smb/client/cifsroot.c.
|
||||
|
||||
server-ip
|
||||
IPv4 address of the server.
|
|
@ -52,3 +52,22 @@ Build kernel with:
|
|||
|
||||
Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page
|
||||
table support without extra kernel parameter.
|
||||
|
||||
Implementation notes
|
||||
====================
|
||||
|
||||
We specifically decided not to use VMA information in order to avoid relying on
|
||||
MM states (except for limited "struct page" info). The page table check is a
|
||||
separate from Linux-MM state machine that verifies that the user accessible
|
||||
pages are not falsely shared.
|
||||
|
||||
PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. The reason is that without
|
||||
EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory
|
||||
regions into the userspace via /dev/mem. At the same time, pages may change
|
||||
their properties (e.g., from anonymous pages to named pages) while they are
|
||||
still being mapped in the userspace, leading to "corruption" detected by the
|
||||
page table check.
|
||||
|
||||
Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via
|
||||
/dev/mem. However, these pages are always considered as named pages, so they
|
||||
won't break the logic used in the page table check.
|
||||
|
|
|
@ -60,22 +60,6 @@ attribute-sets:
|
|||
type: nest
|
||||
nested-attributes: bitset-bits
|
||||
|
||||
-
|
||||
name: u64-array
|
||||
attributes:
|
||||
-
|
||||
name: u64
|
||||
type: nest
|
||||
multi-attr: true
|
||||
nested-attributes: u64
|
||||
-
|
||||
name: s32-array
|
||||
attributes:
|
||||
-
|
||||
name: s32
|
||||
type: nest
|
||||
multi-attr: true
|
||||
nested-attributes: s32
|
||||
-
|
||||
name: string
|
||||
attributes:
|
||||
|
@ -705,16 +689,16 @@ attribute-sets:
|
|||
type: u8
|
||||
-
|
||||
name: corrected
|
||||
type: nest
|
||||
nested-attributes: u64-array
|
||||
type: binary
|
||||
sub-type: u64
|
||||
-
|
||||
name: uncorr
|
||||
type: nest
|
||||
nested-attributes: u64-array
|
||||
type: binary
|
||||
sub-type: u64
|
||||
-
|
||||
name: corr-bits
|
||||
type: nest
|
||||
nested-attributes: u64-array
|
||||
type: binary
|
||||
sub-type: u64
|
||||
-
|
||||
name: fec
|
||||
attributes:
|
||||
|
@ -827,8 +811,8 @@ attribute-sets:
|
|||
type: u32
|
||||
-
|
||||
name: index
|
||||
type: nest
|
||||
nested-attributes: s32-array
|
||||
type: binary
|
||||
sub-type: s32
|
||||
-
|
||||
name: module
|
||||
attributes:
|
||||
|
|
|
@ -68,6 +68,9 @@ attribute-sets:
|
|||
type: nest
|
||||
nested-attributes: x509
|
||||
multi-attr: true
|
||||
-
|
||||
name: peername
|
||||
type: string
|
||||
-
|
||||
name: done
|
||||
attributes:
|
||||
|
@ -105,6 +108,7 @@ operations:
|
|||
- auth-mode
|
||||
- peer-identity
|
||||
- certificate
|
||||
- peername
|
||||
-
|
||||
name: done
|
||||
doc: Handler reports handshake completion
|
||||
|
|
|
@ -40,6 +40,7 @@ flow_steering_mode: Device flow steering mode
|
|||
---------------------------------------------
|
||||
The flow steering mode parameter controls the flow steering mode of the driver.
|
||||
Two modes are supported:
|
||||
|
||||
1. 'dmfs' - Device managed flow steering.
|
||||
2. 'smfs' - Software/Driver managed flow steering.
|
||||
|
||||
|
@ -99,6 +100,7 @@ between representors and stacked devices.
|
|||
By default metadata is enabled on the supported devices in E-switch.
|
||||
Metadata is applicable only for E-switch in switchdev mode and
|
||||
users may disable it when NONE of the below use cases will be in use:
|
||||
|
||||
1. HCA is in Dual/multi-port RoCE mode.
|
||||
2. VF/SF representor bonding (Usually used for Live migration)
|
||||
3. Stacked devices
|
||||
|
@ -180,7 +182,8 @@ User commands examples:
|
|||
|
||||
$ devlink health diagnose pci/0000:82:00.0 reporter tx
|
||||
|
||||
NOTE: This command has valid output only when interface is up, otherwise the command has empty output.
|
||||
.. note::
|
||||
This command has valid output only when interface is up, otherwise the command has empty output.
|
||||
|
||||
- Show number of tx errors indicated, number of recover flows ended successfully,
|
||||
is autorecover enabled and graceful period from last recover::
|
||||
|
@ -232,8 +235,9 @@ User commands examples:
|
|||
|
||||
$ devlink health dump show pci/0000:82:00.0 reporter fw
|
||||
|
||||
NOTE: This command can run only on the PF which has fw tracer ownership,
|
||||
running it on other PF or any VF will return "Operation not permitted".
|
||||
.. note::
|
||||
This command can run only on the PF which has fw tracer ownership,
|
||||
running it on other PF or any VF will return "Operation not permitted".
|
||||
|
||||
fw fatal reporter
|
||||
-----------------
|
||||
|
@ -256,7 +260,8 @@ User commands examples:
|
|||
|
||||
$ devlink health dump show pci/0000:82:00.1 reporter fw_fatal
|
||||
|
||||
NOTE: This command can run only on PF.
|
||||
.. note::
|
||||
This command can run only on PF.
|
||||
|
||||
vnic reporter
|
||||
-------------
|
||||
|
@ -265,28 +270,37 @@ It is responsible for querying the vnic diagnostic counters from fw and displayi
|
|||
them in realtime.
|
||||
|
||||
Description of the vnic counters:
|
||||
total_q_under_processor_handle: number of queues in an error state due to
|
||||
an async error or errored command.
|
||||
send_queue_priority_update_flow: number of QP/SQ priority/SL update
|
||||
events.
|
||||
cq_overrun: number of times CQ entered an error state due to an
|
||||
overflow.
|
||||
async_eq_overrun: number of times an EQ mapped to async events was
|
||||
overrun.
|
||||
comp_eq_overrun: number of times an EQ mapped to completion events was
|
||||
overrun.
|
||||
quota_exceeded_command: number of commands issued and failed due to quota
|
||||
exceeded.
|
||||
invalid_command: number of commands issued and failed dues to any reason
|
||||
other than quota exceeded.
|
||||
nic_receive_steering_discard: number of packets that completed RX flow
|
||||
steering but were discarded due to a mismatch in flow table.
|
||||
|
||||
- total_q_under_processor_handle
|
||||
number of queues in an error state due to
|
||||
an async error or errored command.
|
||||
- send_queue_priority_update_flow
|
||||
number of QP/SQ priority/SL update events.
|
||||
- cq_overrun
|
||||
number of times CQ entered an error state due to an overflow.
|
||||
- async_eq_overrun
|
||||
number of times an EQ mapped to async events was overrun.
|
||||
comp_eq_overrun number of times an EQ mapped to completion events was
|
||||
overrun.
|
||||
- quota_exceeded_command
|
||||
number of commands issued and failed due to quota exceeded.
|
||||
- invalid_command
|
||||
number of commands issued and failed dues to any reason other than quota
|
||||
exceeded.
|
||||
- nic_receive_steering_discard
|
||||
number of packets that completed RX flow
|
||||
steering but were discarded due to a mismatch in flow table.
|
||||
|
||||
User commands examples:
|
||||
- Diagnose PF/VF vnic counters
|
||||
|
||||
- Diagnose PF/VF vnic counters::
|
||||
|
||||
$ devlink health diagnose pci/0000:82:00.1 reporter vnic
|
||||
|
||||
- Diagnose representor vnic counters (performed by supplying devlink port of the
|
||||
representor, which can be obtained via devlink port command)
|
||||
representor, which can be obtained via devlink port command)::
|
||||
|
||||
$ devlink health diagnose pci/0000:82:00.1/65537 reporter vnic
|
||||
|
||||
NOTE: This command can run over all interfaces such as PF/VF and representor ports.
|
||||
.. note::
|
||||
This command can run over all interfaces such as PF/VF and representor ports.
|
||||
|
|
|
@ -53,6 +53,7 @@ fills in a structure that contains the parameters of the request:
|
|||
struct socket *ta_sock;
|
||||
tls_done_func_t ta_done;
|
||||
void *ta_data;
|
||||
const char *ta_peername;
|
||||
unsigned int ta_timeout_ms;
|
||||
key_serial_t ta_keyring;
|
||||
key_serial_t ta_my_cert;
|
||||
|
@ -71,6 +72,10 @@ instantiated a struct file in sock->file.
|
|||
has completed. Further explanation of this function is in the "Handshake
|
||||
Completion" sesction below.
|
||||
|
||||
The consumer can provide a NUL-terminated hostname in the @ta_peername
|
||||
field that is sent as part of ClientHello. If no peername is provided,
|
||||
the DNS hostname associated with the server's IP address is used instead.
|
||||
|
||||
The consumer can fill in the @ta_timeout_ms field to force the servicing
|
||||
handshake agent to exit after a number of milliseconds. This enables the
|
||||
socket to be fully closed once both the kernel and the handshake agent
|
||||
|
|
|
@ -127,13 +127,32 @@ the value of ``Message-ID`` to the URL above.
|
|||
Updating patch status
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
It may be tempting to help the maintainers and update the state of your
|
||||
own patches when you post a new version or spot a bug. Please **do not**
|
||||
do that.
|
||||
Interfering with the patch status on patchwork will only cause confusion. Leave
|
||||
it to the maintainer to figure out what is the most recent and current
|
||||
version that should be applied. If there is any doubt, the maintainer
|
||||
will reply and ask what should be done.
|
||||
Contributors and reviewers do not have the permissions to update patch
|
||||
state directly in patchwork. Patchwork doesn't expose much information
|
||||
about the history of the state of patches, therefore having multiple
|
||||
people update the state leads to confusion.
|
||||
|
||||
Instead of delegating patchwork permissions netdev uses a simple mail
|
||||
bot which looks for special commands/lines within the emails sent to
|
||||
the mailing list. For example to mark a series as Changes Requested
|
||||
one needs to send the following line anywhere in the email thread::
|
||||
|
||||
pw-bot: changes-requested
|
||||
|
||||
As a result the bot will set the entire series to Changes Requested.
|
||||
This may be useful when author discovers a bug in their own series
|
||||
and wants to prevent it from getting applied.
|
||||
|
||||
The use of the bot is entirely optional, if in doubt ignore its existence
|
||||
completely. Maintainers will classify and update the state of the patches
|
||||
themselves. No email should ever be sent to the list with the main purpose
|
||||
of communicating with the bot, the bot commands should be seen as metadata.
|
||||
|
||||
The use of the bot is restricted to authors of the patches (the ``From:``
|
||||
header on patch submission and command must match!), maintainers themselves
|
||||
and a handful of senior reviewers. Bot records its activity here:
|
||||
|
||||
https://patchwork.hopto.org/pw-bot.html
|
||||
|
||||
Review timelines
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
|
|
@ -35,7 +35,7 @@ Documentation written by Tom Zanussi
|
|||
in place of an explicit value field - this is simply a count of
|
||||
event hits. If 'values' isn't specified, an implicit 'hitcount'
|
||||
value will be automatically created and used as the only value.
|
||||
Keys can be any field, or the special string 'stacktrace', which
|
||||
Keys can be any field, or the special string 'common_stacktrace', which
|
||||
will use the event's kernel stacktrace as the key. The keywords
|
||||
'keys' or 'key' can be used to specify keys, and the keywords
|
||||
'values', 'vals', or 'val' can be used to specify values. Compound
|
||||
|
@ -54,7 +54,7 @@ Documentation written by Tom Zanussi
|
|||
'compatible' if the fields named in the trigger share the same
|
||||
number and type of fields and those fields also have the same names.
|
||||
Note that any two events always share the compatible 'hitcount' and
|
||||
'stacktrace' fields and can therefore be combined using those
|
||||
'common_stacktrace' fields and can therefore be combined using those
|
||||
fields, however pointless that may be.
|
||||
|
||||
'hist' triggers add a 'hist' file to each event's subdirectory.
|
||||
|
@ -547,9 +547,9 @@ Extended error information
|
|||
the hist trigger display symbolic call_sites, we can have the hist
|
||||
trigger additionally display the complete set of kernel stack traces
|
||||
that led to each call_site. To do that, we simply use the special
|
||||
value 'stacktrace' for the key parameter::
|
||||
value 'common_stacktrace' for the key parameter::
|
||||
|
||||
# echo 'hist:keys=stacktrace:values=bytes_req,bytes_alloc:sort=bytes_alloc' > \
|
||||
# echo 'hist:keys=common_stacktrace:values=bytes_req,bytes_alloc:sort=bytes_alloc' > \
|
||||
/sys/kernel/tracing/events/kmem/kmalloc/trigger
|
||||
|
||||
The above trigger will use the kernel stack trace in effect when an
|
||||
|
@ -561,9 +561,9 @@ Extended error information
|
|||
every callpath to a kmalloc for a kernel compile)::
|
||||
|
||||
# cat /sys/kernel/tracing/events/kmem/kmalloc/hist
|
||||
# trigger info: hist:keys=stacktrace:vals=bytes_req,bytes_alloc:sort=bytes_alloc:size=2048 [active]
|
||||
# trigger info: hist:keys=common_stacktrace:vals=bytes_req,bytes_alloc:sort=bytes_alloc:size=2048 [active]
|
||||
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__kmalloc_track_caller+0x10b/0x1a0
|
||||
kmemdup+0x20/0x50
|
||||
hidraw_report_event+0x8a/0x120 [hid]
|
||||
|
@ -581,7 +581,7 @@ Extended error information
|
|||
cpu_startup_entry+0x315/0x3e0
|
||||
rest_init+0x7c/0x80
|
||||
} hitcount: 3 bytes_req: 21 bytes_alloc: 24
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__kmalloc_track_caller+0x10b/0x1a0
|
||||
kmemdup+0x20/0x50
|
||||
hidraw_report_event+0x8a/0x120 [hid]
|
||||
|
@ -596,7 +596,7 @@ Extended error information
|
|||
do_IRQ+0x5a/0xf0
|
||||
ret_from_intr+0x0/0x30
|
||||
} hitcount: 3 bytes_req: 21 bytes_alloc: 24
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
kmem_cache_alloc_trace+0xeb/0x150
|
||||
aa_alloc_task_context+0x27/0x40
|
||||
apparmor_cred_prepare+0x1f/0x50
|
||||
|
@ -608,7 +608,7 @@ Extended error information
|
|||
.
|
||||
.
|
||||
.
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__kmalloc+0x11b/0x1b0
|
||||
i915_gem_execbuffer2+0x6c/0x2c0 [i915]
|
||||
drm_ioctl+0x349/0x670 [drm]
|
||||
|
@ -616,7 +616,7 @@ Extended error information
|
|||
SyS_ioctl+0x81/0xa0
|
||||
system_call_fastpath+0x12/0x6a
|
||||
} hitcount: 17726 bytes_req: 13944120 bytes_alloc: 19593808
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__kmalloc+0x11b/0x1b0
|
||||
load_elf_phdrs+0x76/0xa0
|
||||
load_elf_binary+0x102/0x1650
|
||||
|
@ -625,7 +625,7 @@ Extended error information
|
|||
SyS_execve+0x3a/0x50
|
||||
return_from_execve+0x0/0x23
|
||||
} hitcount: 33348 bytes_req: 17152128 bytes_alloc: 20226048
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
kmem_cache_alloc_trace+0xeb/0x150
|
||||
apparmor_file_alloc_security+0x27/0x40
|
||||
security_file_alloc+0x16/0x20
|
||||
|
@ -636,7 +636,7 @@ Extended error information
|
|||
SyS_open+0x1e/0x20
|
||||
system_call_fastpath+0x12/0x6a
|
||||
} hitcount: 4766422 bytes_req: 9532844 bytes_alloc: 38131376
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__kmalloc+0x11b/0x1b0
|
||||
seq_buf_alloc+0x1b/0x50
|
||||
seq_read+0x2cc/0x370
|
||||
|
@ -1026,7 +1026,7 @@ Extended error information
|
|||
First we set up an initially paused stacktrace trigger on the
|
||||
netif_receive_skb event::
|
||||
|
||||
# echo 'hist:key=stacktrace:vals=len:pause' > \
|
||||
# echo 'hist:key=common_stacktrace:vals=len:pause' > \
|
||||
/sys/kernel/tracing/events/net/netif_receive_skb/trigger
|
||||
|
||||
Next, we set up an 'enable_hist' trigger on the sched_process_exec
|
||||
|
@ -1060,9 +1060,9 @@ Extended error information
|
|||
$ wget https://www.kernel.org/pub/linux/kernel/v3.x/patch-3.19.xz
|
||||
|
||||
# cat /sys/kernel/tracing/events/net/netif_receive_skb/hist
|
||||
# trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused]
|
||||
# trigger info: hist:keys=common_stacktrace:vals=len:sort=hitcount:size=2048 [paused]
|
||||
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__netif_receive_skb_core+0x46d/0x990
|
||||
__netif_receive_skb+0x18/0x60
|
||||
netif_receive_skb_internal+0x23/0x90
|
||||
|
@ -1079,7 +1079,7 @@ Extended error information
|
|||
kthread+0xd2/0xf0
|
||||
ret_from_fork+0x42/0x70
|
||||
} hitcount: 85 len: 28884
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__netif_receive_skb_core+0x46d/0x990
|
||||
__netif_receive_skb+0x18/0x60
|
||||
netif_receive_skb_internal+0x23/0x90
|
||||
|
@ -1097,7 +1097,7 @@ Extended error information
|
|||
irq_thread+0x11f/0x150
|
||||
kthread+0xd2/0xf0
|
||||
} hitcount: 98 len: 664329
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__netif_receive_skb_core+0x46d/0x990
|
||||
__netif_receive_skb+0x18/0x60
|
||||
process_backlog+0xa8/0x150
|
||||
|
@ -1115,7 +1115,7 @@ Extended error information
|
|||
inet_sendmsg+0x64/0xa0
|
||||
sock_sendmsg+0x3d/0x50
|
||||
} hitcount: 115 len: 13030
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__netif_receive_skb_core+0x46d/0x990
|
||||
__netif_receive_skb+0x18/0x60
|
||||
netif_receive_skb_internal+0x23/0x90
|
||||
|
@ -1142,14 +1142,14 @@ Extended error information
|
|||
into the histogram. In order to avoid having to set everything up
|
||||
again, we can just clear the histogram first::
|
||||
|
||||
# echo 'hist:key=stacktrace:vals=len:clear' >> \
|
||||
# echo 'hist:key=common_stacktrace:vals=len:clear' >> \
|
||||
/sys/kernel/tracing/events/net/netif_receive_skb/trigger
|
||||
|
||||
Just to verify that it is in fact cleared, here's what we now see in
|
||||
the hist file::
|
||||
|
||||
# cat /sys/kernel/tracing/events/net/netif_receive_skb/hist
|
||||
# trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused]
|
||||
# trigger info: hist:keys=common_stacktrace:vals=len:sort=hitcount:size=2048 [paused]
|
||||
|
||||
Totals:
|
||||
Hits: 0
|
||||
|
@ -1485,12 +1485,12 @@ Extended error information
|
|||
|
||||
And here's an example that shows how to combine histogram data from
|
||||
any two events even if they don't share any 'compatible' fields
|
||||
other than 'hitcount' and 'stacktrace'. These commands create a
|
||||
other than 'hitcount' and 'common_stacktrace'. These commands create a
|
||||
couple of triggers named 'bar' using those fields::
|
||||
|
||||
# echo 'hist:name=bar:key=stacktrace:val=hitcount' > \
|
||||
# echo 'hist:name=bar:key=common_stacktrace:val=hitcount' > \
|
||||
/sys/kernel/tracing/events/sched/sched_process_fork/trigger
|
||||
# echo 'hist:name=bar:key=stacktrace:val=hitcount' > \
|
||||
# echo 'hist:name=bar:key=common_stacktrace:val=hitcount' > \
|
||||
/sys/kernel/tracing/events/net/netif_rx/trigger
|
||||
|
||||
And displaying the output of either shows some interesting if
|
||||
|
@ -1501,16 +1501,16 @@ Extended error information
|
|||
|
||||
# event histogram
|
||||
#
|
||||
# trigger info: hist:name=bar:keys=stacktrace:vals=hitcount:sort=hitcount:size=2048 [active]
|
||||
# trigger info: hist:name=bar:keys=common_stacktrace:vals=hitcount:sort=hitcount:size=2048 [active]
|
||||
#
|
||||
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
kernel_clone+0x18e/0x330
|
||||
kernel_thread+0x29/0x30
|
||||
kthreadd+0x154/0x1b0
|
||||
ret_from_fork+0x3f/0x70
|
||||
} hitcount: 1
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
netif_rx_internal+0xb2/0xd0
|
||||
netif_rx_ni+0x20/0x70
|
||||
dev_loopback_xmit+0xaa/0xd0
|
||||
|
@ -1528,7 +1528,7 @@ Extended error information
|
|||
call_cpuidle+0x3b/0x60
|
||||
cpu_startup_entry+0x22d/0x310
|
||||
} hitcount: 1
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
netif_rx_internal+0xb2/0xd0
|
||||
netif_rx_ni+0x20/0x70
|
||||
dev_loopback_xmit+0xaa/0xd0
|
||||
|
@ -1543,7 +1543,7 @@ Extended error information
|
|||
SyS_sendto+0xe/0x10
|
||||
entry_SYSCALL_64_fastpath+0x12/0x6a
|
||||
} hitcount: 2
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
netif_rx_internal+0xb2/0xd0
|
||||
netif_rx+0x1c/0x60
|
||||
loopback_xmit+0x6c/0xb0
|
||||
|
@ -1561,7 +1561,7 @@ Extended error information
|
|||
sock_sendmsg+0x38/0x50
|
||||
___sys_sendmsg+0x14e/0x270
|
||||
} hitcount: 76
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
netif_rx_internal+0xb2/0xd0
|
||||
netif_rx+0x1c/0x60
|
||||
loopback_xmit+0x6c/0xb0
|
||||
|
@ -1579,7 +1579,7 @@ Extended error information
|
|||
sock_sendmsg+0x38/0x50
|
||||
___sys_sendmsg+0x269/0x270
|
||||
} hitcount: 77
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
netif_rx_internal+0xb2/0xd0
|
||||
netif_rx+0x1c/0x60
|
||||
loopback_xmit+0x6c/0xb0
|
||||
|
@ -1597,7 +1597,7 @@ Extended error information
|
|||
sock_sendmsg+0x38/0x50
|
||||
SYSC_sendto+0xef/0x170
|
||||
} hitcount: 88
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
kernel_clone+0x18e/0x330
|
||||
SyS_clone+0x19/0x20
|
||||
entry_SYSCALL_64_fastpath+0x12/0x6a
|
||||
|
@ -1949,7 +1949,7 @@ uninterruptible state::
|
|||
|
||||
# cd /sys/kernel/tracing
|
||||
# echo 's:block_lat pid_t pid; u64 delta; unsigned long[] stack;' > dynamic_events
|
||||
# echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=stacktrace if prev_state == 2' >> events/sched/sched_switch/trigger
|
||||
# echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=common_stacktrace if prev_state == 2' >> events/sched/sched_switch/trigger
|
||||
# echo 'hist:keys=prev_pid:delta=common_timestamp.usecs-$ts,s=$st:onmax($delta).trace(block_lat,prev_pid,$delta,$s)' >> events/sched/sched_switch/trigger
|
||||
# echo 1 > events/synthetic/block_lat/enable
|
||||
# cat trace
|
||||
|
|
|
@ -363,7 +363,7 @@ Code Seq# Include File Comments
|
|||
0xCC 00-0F drivers/misc/ibmvmc.h pseries VMC driver
|
||||
0xCD 01 linux/reiserfs_fs.h
|
||||
0xCE 01-02 uapi/linux/cxl_mem.h Compute Express Link Memory Devices
|
||||
0xCF 02 fs/cifs/ioctl.c
|
||||
0xCF 02 fs/smb/client/cifs_ioctl.h
|
||||
0xDB 00-0F drivers/char/mwave/mwavepub.h
|
||||
0xDD 00-3F ZFCP device driver see drivers/s390/scsi/
|
||||
<mailto:aherrman@de.ibm.com>
|
||||
|
|
44
MAINTAINERS
44
MAINTAINERS
|
@ -956,7 +956,8 @@ F: Documentation/networking/device_drivers/ethernet/amazon/ena.rst
|
|||
F: drivers/net/ethernet/amazon/
|
||||
|
||||
AMAZON RDMA EFA DRIVER
|
||||
M: Gal Pressman <galpress@amazon.com>
|
||||
M: Michael Margolin <mrgolin@amazon.com>
|
||||
R: Gal Pressman <gal.pressman@linux.dev>
|
||||
R: Yossi Leybovich <sleybo@amazon.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
|
@ -1600,7 +1601,7 @@ F: drivers/media/i2c/ar0521.c
|
|||
|
||||
ARASAN NAND CONTROLLER DRIVER
|
||||
M: Miquel Raynal <miquel.raynal@bootlin.com>
|
||||
M: Naga Sureshkumar Relli <nagasure@xilinx.com>
|
||||
R: Michal Simek <michal.simek@amd.com>
|
||||
L: linux-mtd@lists.infradead.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml
|
||||
|
@ -1763,7 +1764,7 @@ F: include/linux/amba/mmci.h
|
|||
|
||||
ARM PRIMECELL PL35X NAND CONTROLLER DRIVER
|
||||
M: Miquel Raynal <miquel.raynal@bootlin.com>
|
||||
M: Naga Sureshkumar Relli <nagasure@xilinx.com>
|
||||
R: Michal Simek <michal.simek@amd.com>
|
||||
L: linux-mtd@lists.infradead.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml
|
||||
|
@ -1771,7 +1772,7 @@ F: drivers/mtd/nand/raw/pl35x-nand-controller.c
|
|||
|
||||
ARM PRIMECELL PL35X SMC DRIVER
|
||||
M: Miquel Raynal <miquel.raynal@bootlin.com>
|
||||
M: Naga Sureshkumar Relli <nagasure@xilinx.com>
|
||||
R: Michal Simek <michal.simek@amd.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/memory-controllers/arm,pl35x-smc.yaml
|
||||
|
@ -2429,6 +2430,15 @@ X: drivers/net/wireless/atmel/
|
|||
N: at91
|
||||
N: atmel
|
||||
|
||||
ARM/MICROCHIP (ARM64) SoC support
|
||||
M: Conor Dooley <conor@kernel.org>
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@microchip.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
T: git https://git.kernel.org/pub/scm/linux/kernel/git/at91/linux.git
|
||||
F: arch/arm64/boot/dts/microchip/
|
||||
|
||||
ARM/Microchip Sparx5 SoC support
|
||||
M: Lars Povlsen <lars.povlsen@microchip.com>
|
||||
M: Steen Hegelund <Steen.Hegelund@microchip.com>
|
||||
|
@ -2436,8 +2446,7 @@ M: Daniel Machon <daniel.machon@microchip.com>
|
|||
M: UNGLinuxDriver@microchip.com
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
T: git git://github.com/microchip-ung/linux-upstream.git
|
||||
F: arch/arm64/boot/dts/microchip/
|
||||
F: arch/arm64/boot/dts/microchip/sparx*
|
||||
F: drivers/net/ethernet/microchip/vcap/
|
||||
F: drivers/pinctrl/pinctrl-microchip-sgpio.c
|
||||
N: sparx5
|
||||
|
@ -3536,7 +3545,7 @@ F: Documentation/filesystems/befs.rst
|
|||
F: fs/befs/
|
||||
|
||||
BFQ I/O SCHEDULER
|
||||
M: Paolo Valente <paolo.valente@linaro.org>
|
||||
M: Paolo Valente <paolo.valente@unimore.it>
|
||||
M: Jens Axboe <axboe@kernel.dk>
|
||||
L: linux-block@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -5130,7 +5139,7 @@ X: drivers/clk/clkdev.c
|
|||
|
||||
COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3)
|
||||
M: Steve French <sfrench@samba.org>
|
||||
R: Paulo Alcantara <pc@cjr.nz> (DFS, global name space)
|
||||
R: Paulo Alcantara <pc@manguebit.com> (DFS, global name space)
|
||||
R: Ronnie Sahlberg <lsahlber@redhat.com> (directory leases, sparse files)
|
||||
R: Shyam Prasad N <sprasad@microsoft.com> (multichannel)
|
||||
R: Tom Talpey <tom@talpey.com> (RDMA, smbdirect)
|
||||
|
@ -5140,8 +5149,8 @@ S: Supported
|
|||
W: https://wiki.samba.org/index.php/LinuxCIFS
|
||||
T: git git://git.samba.org/sfrench/cifs-2.6.git
|
||||
F: Documentation/admin-guide/cifs/
|
||||
F: fs/cifs/
|
||||
F: fs/smbfs_common/
|
||||
F: fs/smb/client/
|
||||
F: fs/smb/common/
|
||||
F: include/uapi/linux/cifs
|
||||
|
||||
COMPACTPCI HOTPLUG CORE
|
||||
|
@ -8153,6 +8162,7 @@ F: include/linux/spi/spi-fsl-dspi.h
|
|||
|
||||
FREESCALE ENETC ETHERNET DRIVERS
|
||||
M: Claudiu Manoil <claudiu.manoil@nxp.com>
|
||||
M: Vladimir Oltean <vladimir.oltean@nxp.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/freescale/enetc/
|
||||
|
@ -9334,7 +9344,7 @@ F: include/linux/hisi_acc_qm.h
|
|||
|
||||
HISILICON ROCE DRIVER
|
||||
M: Haoyue Xu <xuhaoyue1@hisilicon.com>
|
||||
M: Wenpeng Liang <liangwenpeng@huawei.com>
|
||||
M: Junxian Huang <huangjunxian6@hisilicon.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
|
||||
|
@ -10105,7 +10115,7 @@ S: Maintained
|
|||
F: Documentation/process/kernel-docs.rst
|
||||
|
||||
INDUSTRY PACK SUBSYSTEM (IPACK)
|
||||
M: Samuel Iglesias Gonsalvez <siglesias@igalia.com>
|
||||
M: Vaibhav Gupta <vaibhavgupta40@gmail.com>
|
||||
M: Jens Taprogge <jens.taprogge@taprogge.org>
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
L: industrypack-devel@lists.sourceforge.net
|
||||
|
@ -11300,9 +11310,9 @@ R: Tom Talpey <tom@talpey.com>
|
|||
L: linux-cifs@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.samba.org/ksmbd.git
|
||||
F: Documentation/filesystems/cifs/ksmbd.rst
|
||||
F: fs/ksmbd/
|
||||
F: fs/smbfs_common/
|
||||
F: Documentation/filesystems/smb/ksmbd.rst
|
||||
F: fs/smb/common/
|
||||
F: fs/smb/server/
|
||||
|
||||
KERNEL UNIT TESTING FRAMEWORK (KUnit)
|
||||
M: Brendan Higgins <brendanhiggins@google.com>
|
||||
|
@ -13827,7 +13837,7 @@ F: drivers/tty/serial/8250/8250_pci1xxxx.c
|
|||
|
||||
MICROCHIP POLARFIRE FPGA DRIVERS
|
||||
M: Conor Dooley <conor.dooley@microchip.com>
|
||||
R: Ivan Bornyakov <i.bornyakov@metrotek.ru>
|
||||
R: Vladimir Georgiev <v.georgiev@metrotek.ru>
|
||||
L: linux-fpga@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml
|
||||
|
@ -14926,6 +14936,7 @@ F: drivers/ntb/hw/intel/
|
|||
|
||||
NTFS FILESYSTEM
|
||||
M: Anton Altaparmakov <anton@tuxera.com>
|
||||
R: Namjae Jeon <linkinjeon@kernel.org>
|
||||
L: linux-ntfs-dev@lists.sourceforge.net
|
||||
S: Supported
|
||||
W: http://www.tuxera.com/
|
||||
|
@ -18703,7 +18714,6 @@ F: include/dt-bindings/clock/samsung,*.h
|
|||
F: include/linux/clk/samsung.h
|
||||
|
||||
SAMSUNG SPI DRIVERS
|
||||
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
|
||||
M: Andi Shyti <andi.shyti@kernel.org>
|
||||
L: linux-spi@vger.kernel.org
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -209,6 +209,7 @@
|
|||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_pcie>;
|
||||
reset-gpio = <&gpio6 7 GPIO_ACTIVE_LOW>;
|
||||
vpcie-supply = <®_pcie>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <dt-bindings/input/input.h>
|
||||
#include <dt-bindings/leds/common.h>
|
||||
#include <dt-bindings/pwm/pwm.h>
|
||||
#include <dt-bindings/regulator/dlg,da9063-regulator.h>
|
||||
#include "imx6ull.dtsi"
|
||||
|
||||
/ {
|
||||
|
@ -84,16 +85,20 @@
|
|||
|
||||
regulators {
|
||||
vdd_soc_in_1v4: buck1 {
|
||||
regulator-allowed-modes = <DA9063_BUCK_MODE_SLEEP>; /* PFM */
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
regulator-initial-mode = <DA9063_BUCK_MODE_SLEEP>;
|
||||
regulator-max-microvolt = <1400000>;
|
||||
regulator-min-microvolt = <1400000>;
|
||||
regulator-name = "vdd_soc_in_1v4";
|
||||
};
|
||||
|
||||
vcc_3v3: buck2 {
|
||||
regulator-allowed-modes = <DA9063_BUCK_MODE_SYNC>; /* PWM */
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
regulator-initial-mode = <DA9063_BUCK_MODE_SYNC>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-name = "vcc_3v3";
|
||||
|
@ -106,8 +111,10 @@
|
|||
* the voltage is set to 1.5V.
|
||||
*/
|
||||
vcc_ddr_1v35: buck3 {
|
||||
regulator-allowed-modes = <DA9063_BUCK_MODE_SYNC>; /* PWM */
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
regulator-initial-mode = <DA9063_BUCK_MODE_SYNC>;
|
||||
regulator-max-microvolt = <1500000>;
|
||||
regulator-min-microvolt = <1500000>;
|
||||
regulator-name = "vcc_ddr_1v35";
|
||||
|
|
|
@ -132,6 +132,7 @@
|
|||
reg = <0x2c0f0000 0x1000>;
|
||||
interrupts = <0 84 4>;
|
||||
cache-level = <2>;
|
||||
cache-unified;
|
||||
};
|
||||
|
||||
pmu {
|
||||
|
|
|
@ -59,6 +59,7 @@
|
|||
L2_0: l2-cache0 {
|
||||
compatible = "cache";
|
||||
cache-level = <2>;
|
||||
cache-unified;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -72,6 +72,7 @@
|
|||
L2_0: l2-cache0 {
|
||||
compatible = "cache";
|
||||
cache-level = <2>;
|
||||
cache-unified;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@
|
|||
L2_0: l2-cache0 {
|
||||
compatible = "cache";
|
||||
cache-level = <2>;
|
||||
cache-unified;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -171,6 +171,7 @@ conn_subsys: bus@5b000000 {
|
|||
interrupt-names = "host", "peripheral", "otg", "wakeup";
|
||||
phys = <&usb3_phy>;
|
||||
phy-names = "cdns3,usb3-phy";
|
||||
cdns,on-chip-buff-size = /bits/ 16 <18>;
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -98,11 +98,17 @@
|
|||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
ethphy: ethernet-phy@4 {
|
||||
ethphy: ethernet-phy@4 { /* AR8033 or ADIN1300 */
|
||||
compatible = "ethernet-phy-ieee802.3-c22";
|
||||
reg = <4>;
|
||||
reset-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
|
||||
reset-assert-us = <10000>;
|
||||
/*
|
||||
* Deassert delay:
|
||||
* ADIN1300 requires 5ms.
|
||||
* AR8033 requires 1ms.
|
||||
*/
|
||||
reset-deassert-us = <20000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -1069,13 +1069,6 @@
|
|||
<&clk IMX8MN_CLK_DISP_APB_ROOT>,
|
||||
<&clk IMX8MN_CLK_DISP_AXI_ROOT>;
|
||||
clock-names = "pix", "axi", "disp_axi";
|
||||
assigned-clocks = <&clk IMX8MN_CLK_DISP_PIXEL_ROOT>,
|
||||
<&clk IMX8MN_CLK_DISP_AXI>,
|
||||
<&clk IMX8MN_CLK_DISP_APB>;
|
||||
assigned-clock-parents = <&clk IMX8MN_CLK_DISP_PIXEL>,
|
||||
<&clk IMX8MN_SYS_PLL2_1000M>,
|
||||
<&clk IMX8MN_SYS_PLL1_800M>;
|
||||
assigned-clock-rates = <594000000>, <500000000>, <200000000>;
|
||||
interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
|
||||
power-domains = <&disp_blk_ctrl IMX8MN_DISPBLK_PD_LCDIF>;
|
||||
status = "disabled";
|
||||
|
@ -1093,12 +1086,6 @@
|
|||
clocks = <&clk IMX8MN_CLK_DSI_CORE>,
|
||||
<&clk IMX8MN_CLK_DSI_PHY_REF>;
|
||||
clock-names = "bus_clk", "sclk_mipi";
|
||||
assigned-clocks = <&clk IMX8MN_CLK_DSI_CORE>,
|
||||
<&clk IMX8MN_CLK_DSI_PHY_REF>;
|
||||
assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_266M>,
|
||||
<&clk IMX8MN_CLK_24M>;
|
||||
assigned-clock-rates = <266000000>, <24000000>;
|
||||
samsung,pll-clock-frequency = <24000000>;
|
||||
interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
|
||||
power-domains = <&disp_blk_ctrl IMX8MN_DISPBLK_PD_MIPI_DSI>;
|
||||
status = "disabled";
|
||||
|
@ -1142,6 +1129,21 @@
|
|||
"lcdif-axi", "lcdif-apb", "lcdif-pix",
|
||||
"dsi-pclk", "dsi-ref",
|
||||
"csi-aclk", "csi-pclk";
|
||||
assigned-clocks = <&clk IMX8MN_CLK_DSI_CORE>,
|
||||
<&clk IMX8MN_CLK_DSI_PHY_REF>,
|
||||
<&clk IMX8MN_CLK_DISP_PIXEL>,
|
||||
<&clk IMX8MN_CLK_DISP_AXI>,
|
||||
<&clk IMX8MN_CLK_DISP_APB>;
|
||||
assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_266M>,
|
||||
<&clk IMX8MN_CLK_24M>,
|
||||
<&clk IMX8MN_VIDEO_PLL1_OUT>,
|
||||
<&clk IMX8MN_SYS_PLL2_1000M>,
|
||||
<&clk IMX8MN_SYS_PLL1_800M>;
|
||||
assigned-clock-rates = <266000000>,
|
||||
<24000000>,
|
||||
<594000000>,
|
||||
<500000000>,
|
||||
<200000000>;
|
||||
#power-domain-cells = <1>;
|
||||
};
|
||||
|
||||
|
|
|
@ -1211,13 +1211,6 @@
|
|||
<&clk IMX8MP_CLK_MEDIA_APB_ROOT>,
|
||||
<&clk IMX8MP_CLK_MEDIA_AXI_ROOT>;
|
||||
clock-names = "pix", "axi", "disp_axi";
|
||||
assigned-clocks = <&clk IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT>,
|
||||
<&clk IMX8MP_CLK_MEDIA_AXI>,
|
||||
<&clk IMX8MP_CLK_MEDIA_APB>;
|
||||
assigned-clock-parents = <&clk IMX8MP_CLK_MEDIA_DISP1_PIX>,
|
||||
<&clk IMX8MP_SYS_PLL2_1000M>,
|
||||
<&clk IMX8MP_SYS_PLL1_800M>;
|
||||
assigned-clock-rates = <594000000>, <500000000>, <200000000>;
|
||||
interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
|
||||
power-domains = <&media_blk_ctrl IMX8MP_MEDIABLK_PD_LCDIF_1>;
|
||||
status = "disabled";
|
||||
|
@ -1237,11 +1230,6 @@
|
|||
<&clk IMX8MP_CLK_MEDIA_APB_ROOT>,
|
||||
<&clk IMX8MP_CLK_MEDIA_AXI_ROOT>;
|
||||
clock-names = "pix", "axi", "disp_axi";
|
||||
assigned-clocks = <&clk IMX8MP_CLK_MEDIA_DISP2_PIX>,
|
||||
<&clk IMX8MP_VIDEO_PLL1>;
|
||||
assigned-clock-parents = <&clk IMX8MP_VIDEO_PLL1_OUT>,
|
||||
<&clk IMX8MP_VIDEO_PLL1_REF_SEL>;
|
||||
assigned-clock-rates = <0>, <1039500000>;
|
||||
power-domains = <&media_blk_ctrl IMX8MP_MEDIABLK_PD_LCDIF_2>;
|
||||
status = "disabled";
|
||||
|
||||
|
@ -1296,11 +1284,16 @@
|
|||
"disp1", "disp2", "isp", "phy";
|
||||
|
||||
assigned-clocks = <&clk IMX8MP_CLK_MEDIA_AXI>,
|
||||
<&clk IMX8MP_CLK_MEDIA_APB>;
|
||||
<&clk IMX8MP_CLK_MEDIA_APB>,
|
||||
<&clk IMX8MP_CLK_MEDIA_DISP1_PIX>,
|
||||
<&clk IMX8MP_CLK_MEDIA_DISP2_PIX>,
|
||||
<&clk IMX8MP_VIDEO_PLL1>;
|
||||
assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>,
|
||||
<&clk IMX8MP_SYS_PLL1_800M>;
|
||||
assigned-clock-rates = <500000000>, <200000000>;
|
||||
|
||||
<&clk IMX8MP_SYS_PLL1_800M>,
|
||||
<&clk IMX8MP_VIDEO_PLL1_OUT>,
|
||||
<&clk IMX8MP_VIDEO_PLL1_OUT>;
|
||||
assigned-clock-rates = <500000000>, <200000000>,
|
||||
<0>, <0>, <1039500000>;
|
||||
#power-domain-cells = <1>;
|
||||
|
||||
lvds_bridge: bridge@5c {
|
||||
|
|
|
@ -33,6 +33,12 @@
|
|||
};
|
||||
};
|
||||
|
||||
&iomuxc {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_ext_io0>, <&pinctrl_hog0>, <&pinctrl_hog1>,
|
||||
<&pinctrl_lpspi2_cs2>;
|
||||
};
|
||||
|
||||
/* Colibri SPI */
|
||||
&lpspi2 {
|
||||
status = "okay";
|
||||
|
|
|
@ -48,8 +48,7 @@
|
|||
<IMX8QXP_SAI0_TXFS_LSIO_GPIO0_IO28 0x20>, /* SODIMM 101 */
|
||||
<IMX8QXP_SAI0_RXD_LSIO_GPIO0_IO27 0x20>, /* SODIMM 97 */
|
||||
<IMX8QXP_ENET0_RGMII_RXC_LSIO_GPIO5_IO03 0x06000020>, /* SODIMM 85 */
|
||||
<IMX8QXP_SAI0_TXC_LSIO_GPIO0_IO26 0x20>, /* SODIMM 79 */
|
||||
<IMX8QXP_QSPI0A_DATA1_LSIO_GPIO3_IO10 0x06700041>; /* SODIMM 45 */
|
||||
<IMX8QXP_SAI0_TXC_LSIO_GPIO0_IO26 0x20>; /* SODIMM 79 */
|
||||
};
|
||||
|
||||
pinctrl_uart1_forceoff: uart1forceoffgrp {
|
||||
|
|
|
@ -363,10 +363,6 @@
|
|||
/* TODO VPU Encoder/Decoder */
|
||||
|
||||
&iomuxc {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_ext_io0>, <&pinctrl_hog0>, <&pinctrl_hog1>,
|
||||
<&pinctrl_hog2>, <&pinctrl_lpspi2_cs2>;
|
||||
|
||||
/* On-module touch pen-down interrupt */
|
||||
pinctrl_ad7879_int: ad7879intgrp {
|
||||
fsl,pins = <IMX8QXP_MIPI_CSI0_I2C0_SCL_LSIO_GPIO3_IO05 0x21>;
|
||||
|
@ -499,8 +495,7 @@
|
|||
};
|
||||
|
||||
pinctrl_hog1: hog1grp {
|
||||
fsl,pins = <IMX8QXP_CSI_MCLK_LSIO_GPIO3_IO01 0x20>, /* SODIMM 75 */
|
||||
<IMX8QXP_QSPI0A_SCLK_LSIO_GPIO3_IO16 0x20>; /* SODIMM 93 */
|
||||
fsl,pins = <IMX8QXP_QSPI0A_SCLK_LSIO_GPIO3_IO16 0x20>; /* SODIMM 93 */
|
||||
};
|
||||
|
||||
pinctrl_hog2: hog2grp {
|
||||
|
@ -774,3 +769,10 @@
|
|||
fsl,pins = <IMX8QXP_SCU_BOOT_MODE3_SCU_DSC_RTC_CLOCK_OUTPUT_32K 0x20>;
|
||||
};
|
||||
};
|
||||
|
||||
/* Delete peripherals which are not present on SOC, but are defined in imx8-ss-*.dtsi */
|
||||
|
||||
/delete-node/ &adc1;
|
||||
/delete-node/ &adc1_lpcg;
|
||||
/delete-node/ &dsp;
|
||||
/delete-node/ &dsp_lpcg;
|
||||
|
|
|
@ -632,9 +632,9 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
|
|||
*
|
||||
* The walker will walk the page-table entries corresponding to the input
|
||||
* address range specified, visiting entries according to the walker flags.
|
||||
* Invalid entries are treated as leaf entries. Leaf entries are reloaded
|
||||
* after invoking the walker callback, allowing the walker to descend into
|
||||
* a newly installed table.
|
||||
* Invalid entries are treated as leaf entries. The visited page table entry is
|
||||
* reloaded after invoking the walker callback, allowing the walker to descend
|
||||
* into a newly installed table.
|
||||
*
|
||||
* Returning a negative error code from the walker callback function will
|
||||
* terminate the walk immediately with the same error code.
|
||||
|
|
|
@ -115,8 +115,14 @@
|
|||
#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
|
||||
|
||||
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
|
||||
#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4)
|
||||
#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6)
|
||||
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
|
||||
#define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4)
|
||||
#define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6)
|
||||
#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
|
||||
#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4)
|
||||
#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6)
|
||||
|
||||
/*
|
||||
* Automatically generated definitions for system registers, the
|
||||
|
|
|
@ -412,17 +412,21 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
if (!__populate_fault_info(vcpu))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
__alias(kvm_hyp_handle_memory_fault);
|
||||
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
__alias(kvm_hyp_handle_memory_fault);
|
||||
|
||||
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
if (!__populate_fault_info(vcpu))
|
||||
if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
|
||||
return true;
|
||||
|
||||
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
|
||||
|
|
|
@ -575,7 +575,7 @@ struct pkvm_mem_donation {
|
|||
|
||||
struct check_walk_data {
|
||||
enum pkvm_page_state desired;
|
||||
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte);
|
||||
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr);
|
||||
};
|
||||
|
||||
static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
|
@ -583,10 +583,7 @@ static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
{
|
||||
struct check_walk_data *d = ctx->arg;
|
||||
|
||||
if (kvm_pte_valid(ctx->old) && !addr_is_allowed_memory(kvm_pte_to_phys(ctx->old)))
|
||||
return -EINVAL;
|
||||
|
||||
return d->get_page_state(ctx->old) == d->desired ? 0 : -EPERM;
|
||||
return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM;
|
||||
}
|
||||
|
||||
static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
||||
|
@ -601,8 +598,11 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
|||
return kvm_pgtable_walk(pgt, addr, size, &walker);
|
||||
}
|
||||
|
||||
static enum pkvm_page_state host_get_page_state(kvm_pte_t pte)
|
||||
static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr)
|
||||
{
|
||||
if (!addr_is_allowed_memory(addr))
|
||||
return PKVM_NOPAGE;
|
||||
|
||||
if (!kvm_pte_valid(pte) && pte)
|
||||
return PKVM_NOPAGE;
|
||||
|
||||
|
@ -709,7 +709,7 @@ static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx
|
|||
return host_stage2_set_owner_locked(addr, size, host_id);
|
||||
}
|
||||
|
||||
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
|
||||
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
|
||||
{
|
||||
if (!kvm_pte_valid(pte))
|
||||
return PKVM_NOPAGE;
|
||||
|
|
|
@ -186,6 +186,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
|
|||
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
|
||||
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
|
||||
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
|
||||
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
|
||||
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
|
||||
};
|
||||
|
||||
|
@ -196,6 +197,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
|
|||
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
|
||||
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
|
||||
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
|
||||
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
|
||||
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
|
||||
};
|
||||
|
||||
|
|
|
@ -209,14 +209,26 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
|
|||
.flags = flags,
|
||||
};
|
||||
int ret = 0;
|
||||
bool reload = false;
|
||||
kvm_pteref_t childp;
|
||||
bool table = kvm_pte_table(ctx.old, level);
|
||||
|
||||
if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE))
|
||||
if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
|
||||
ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
|
||||
reload = true;
|
||||
}
|
||||
|
||||
if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
|
||||
ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
|
||||
reload = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reload the page table after invoking the walker callback for leaf
|
||||
* entries or after pre-order traversal, to allow the walker to descend
|
||||
* into a newly installed or replaced table.
|
||||
*/
|
||||
if (reload) {
|
||||
ctx.old = READ_ONCE(*ptep);
|
||||
table = kvm_pte_table(ctx.old, level);
|
||||
}
|
||||
|
@ -1320,4 +1332,7 @@ void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pg
|
|||
};
|
||||
|
||||
WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));
|
||||
|
||||
WARN_ON(mm_ops->page_count(pgtable) != 1);
|
||||
mm_ops->put_page(pgtable);
|
||||
}
|
||||
|
|
|
@ -110,6 +110,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
|
|||
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
|
||||
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
|
||||
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
|
||||
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
|
||||
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
|
||||
};
|
||||
|
||||
|
|
|
@ -694,45 +694,23 @@ out_unlock:
|
|||
|
||||
static struct arm_pmu *kvm_pmu_probe_armpmu(void)
|
||||
{
|
||||
struct perf_event_attr attr = { };
|
||||
struct perf_event *event;
|
||||
struct arm_pmu *pmu = NULL;
|
||||
struct arm_pmu *tmp, *pmu = NULL;
|
||||
struct arm_pmu_entry *entry;
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* Create a dummy event that only counts user cycles. As we'll never
|
||||
* leave this function with the event being live, it will never
|
||||
* count anything. But it allows us to probe some of the PMU
|
||||
* details. Yes, this is terrible.
|
||||
*/
|
||||
attr.type = PERF_TYPE_RAW;
|
||||
attr.size = sizeof(attr);
|
||||
attr.pinned = 1;
|
||||
attr.disabled = 0;
|
||||
attr.exclude_user = 0;
|
||||
attr.exclude_kernel = 1;
|
||||
attr.exclude_hv = 1;
|
||||
attr.exclude_host = 1;
|
||||
attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
|
||||
attr.sample_period = GENMASK(63, 0);
|
||||
mutex_lock(&arm_pmus_lock);
|
||||
|
||||
event = perf_event_create_kernel_counter(&attr, -1, current,
|
||||
kvm_pmu_perf_overflow, &attr);
|
||||
cpu = smp_processor_id();
|
||||
list_for_each_entry(entry, &arm_pmus, entry) {
|
||||
tmp = entry->arm_pmu;
|
||||
|
||||
if (IS_ERR(event)) {
|
||||
pr_err_once("kvm: pmu event creation failed %ld\n",
|
||||
PTR_ERR(event));
|
||||
return NULL;
|
||||
if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
|
||||
pmu = tmp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (event->pmu) {
|
||||
pmu = to_arm_pmu(event->pmu);
|
||||
if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
|
||||
pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
|
||||
pmu = NULL;
|
||||
}
|
||||
|
||||
perf_event_disable(event);
|
||||
perf_event_release_kernel(event);
|
||||
mutex_unlock(&arm_pmus_lock);
|
||||
|
||||
return pmu;
|
||||
}
|
||||
|
@ -912,7 +890,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
|
|||
return -EBUSY;
|
||||
|
||||
if (!kvm->arch.arm_pmu) {
|
||||
/* No PMU set, get the default one */
|
||||
/*
|
||||
* No PMU set, get the default one.
|
||||
*
|
||||
* The observant among you will notice that the supported_cpus
|
||||
* mask does not get updated for the default PMU even though it
|
||||
* is quite possible the selected instance supports only a
|
||||
* subset of cores in the system. This is intentional, and
|
||||
* upholds the preexisting behavior on heterogeneous systems
|
||||
* where vCPUs can be scheduled on any core but the guest
|
||||
* counters could stop working.
|
||||
*/
|
||||
kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
|
||||
if (!kvm->arch.arm_pmu)
|
||||
return -ENODEV;
|
||||
|
|
|
@ -211,6 +211,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool access_dcgsw(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (!kvm_has_mte(vcpu->kvm)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Treat MTE S/W ops as we treat the classic ones: with contempt */
|
||||
return access_dcsw(vcpu, p, r);
|
||||
}
|
||||
|
||||
static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
|
||||
{
|
||||
switch (r->aarch32_map) {
|
||||
|
@ -1756,8 +1769,14 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
|
|||
*/
|
||||
static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_DC_ISW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CSW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CISW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
|
||||
|
||||
DBG_BCR_BVR_WCR_WVR_EL1(0),
|
||||
DBG_BCR_BVR_WCR_WVR_EL1(1),
|
||||
|
|
|
@ -235,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
|
|||
* KVM io device for the redistributor that belongs to this VCPU.
|
||||
*/
|
||||
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
|
||||
mutex_lock(&vcpu->kvm->arch.config_lock);
|
||||
mutex_lock(&vcpu->kvm->slots_lock);
|
||||
ret = vgic_register_redist_iodev(vcpu);
|
||||
mutex_unlock(&vcpu->kvm->arch.config_lock);
|
||||
mutex_unlock(&vcpu->kvm->slots_lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -406,7 +406,7 @@ void kvm_vgic_destroy(struct kvm *kvm)
|
|||
|
||||
/**
|
||||
* vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
|
||||
* is a GICv2. A GICv3 must be explicitly initialized by the guest using the
|
||||
* is a GICv2. A GICv3 must be explicitly initialized by userspace using the
|
||||
* KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group.
|
||||
* @kvm: kvm struct pointer
|
||||
*/
|
||||
|
@ -446,11 +446,13 @@ int vgic_lazy_init(struct kvm *kvm)
|
|||
int kvm_vgic_map_resources(struct kvm *kvm)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
gpa_t dist_base;
|
||||
int ret = 0;
|
||||
|
||||
if (likely(vgic_ready(kvm)))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
if (vgic_ready(kvm))
|
||||
goto out;
|
||||
|
@ -463,13 +465,26 @@ int kvm_vgic_map_resources(struct kvm *kvm)
|
|||
else
|
||||
ret = vgic_v3_map_resources(kvm);
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
__kvm_vgic_destroy(kvm);
|
||||
else
|
||||
dist->ready = true;
|
||||
goto out;
|
||||
}
|
||||
dist->ready = true;
|
||||
dist_base = dist->vgic_dist_base;
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
||||
ret = vgic_register_dist_iodev(kvm, dist_base,
|
||||
kvm_vgic_global_state.type);
|
||||
if (ret) {
|
||||
kvm_err("Unable to register VGIC dist MMIO regions\n");
|
||||
kvm_vgic_destroy(kvm);
|
||||
}
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return ret;
|
||||
|
||||
out:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1936,6 +1936,7 @@ void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
|
|||
|
||||
static int vgic_its_create(struct kvm_device *dev, u32 type)
|
||||
{
|
||||
int ret;
|
||||
struct vgic_its *its;
|
||||
|
||||
if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
|
||||
|
@ -1945,9 +1946,12 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
|
|||
if (!its)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&dev->kvm->arch.config_lock);
|
||||
|
||||
if (vgic_initialized(dev->kvm)) {
|
||||
int ret = vgic_v4_init(dev->kvm);
|
||||
ret = vgic_v4_init(dev->kvm);
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&dev->kvm->arch.config_lock);
|
||||
kfree(its);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1960,12 +1964,10 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
|
|||
|
||||
/* Yep, even more trickery for lock ordering... */
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
mutex_lock(&dev->kvm->arch.config_lock);
|
||||
mutex_lock(&its->cmd_lock);
|
||||
mutex_lock(&its->its_lock);
|
||||
mutex_unlock(&its->its_lock);
|
||||
mutex_unlock(&its->cmd_lock);
|
||||
mutex_unlock(&dev->kvm->arch.config_lock);
|
||||
#endif
|
||||
|
||||
its->vgic_its_base = VGIC_ADDR_UNDEF;
|
||||
|
@ -1986,7 +1988,11 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
|
|||
|
||||
dev->private = its;
|
||||
|
||||
return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
|
||||
ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1);
|
||||
|
||||
mutex_unlock(&dev->kvm->arch.config_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vgic_its_destroy(struct kvm_device *kvm_dev)
|
||||
|
|
|
@ -102,7 +102,11 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
|
|||
if (get_user(addr, uaddr))
|
||||
return -EFAULT;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
/*
|
||||
* Since we can't hold config_lock while registering the redistributor
|
||||
* iodevs, take the slots_lock immediately.
|
||||
*/
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
switch (attr->attr) {
|
||||
case KVM_VGIC_V2_ADDR_TYPE_DIST:
|
||||
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
|
||||
|
@ -182,6 +186,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
|
|||
if (r)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
if (write) {
|
||||
r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
|
||||
if (!r)
|
||||
|
@ -189,9 +194,10 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
|
|||
} else {
|
||||
addr = *addr_ptr;
|
||||
}
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
||||
out:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
if (!r && !write)
|
||||
r = put_user(addr, uaddr);
|
||||
|
|
|
@ -769,10 +769,13 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
|
|||
struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
|
||||
struct vgic_redist_region *rdreg;
|
||||
gpa_t rd_base;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&kvm->slots_lock);
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
|
||||
if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
|
||||
return 0;
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* We may be creating VCPUs before having set the base address for the
|
||||
|
@ -782,10 +785,12 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
|
||||
if (!rdreg)
|
||||
return 0;
|
||||
goto out_unlock;
|
||||
|
||||
if (!vgic_v3_check_base(kvm))
|
||||
return -EINVAL;
|
||||
if (!vgic_v3_check_base(kvm)) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
vgic_cpu->rdreg = rdreg;
|
||||
vgic_cpu->rdreg_index = rdreg->free_index;
|
||||
|
@ -799,16 +804,20 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
|
|||
rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
|
||||
rd_dev->redist_vcpu = vcpu;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
||||
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
|
||||
2 * SZ_64K, &rd_dev->dev);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Protected by slots_lock */
|
||||
rdreg->free_index++;
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
|
||||
|
@ -834,12 +843,10 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
|
|||
/* The current c failed, so iterate over the previous ones. */
|
||||
int i;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
for (i = 0; i < c; i++) {
|
||||
vcpu = kvm_get_vcpu(kvm, i);
|
||||
vgic_unregister_redist_iodev(vcpu);
|
||||
}
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -938,7 +945,9 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
|
|||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -950,8 +959,10 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
|
|||
if (ret) {
|
||||
struct vgic_redist_region *rdreg;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
rdreg = vgic_v3_rdist_region_from_index(kvm, index);
|
||||
vgic_v3_free_redist_region(rdreg);
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1096,7 +1096,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
|
|||
enum vgic_type type)
|
||||
{
|
||||
struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
|
||||
int ret = 0;
|
||||
unsigned int len;
|
||||
|
||||
switch (type) {
|
||||
|
@ -1114,10 +1113,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
|
|||
io_device->iodev_type = IODEV_DIST;
|
||||
io_device->redist_vcpu = NULL;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
|
||||
len, &io_device->dev);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
return ret;
|
||||
return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
|
||||
len, &io_device->dev);
|
||||
}
|
||||
|
|
|
@ -312,12 +312,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
|
||||
if (ret) {
|
||||
kvm_err("Unable to register VGIC MMIO regions\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
|
||||
ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
|
||||
kvm_vgic_global_state.vcpu_base,
|
||||
|
|
|
@ -539,7 +539,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
|
|||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int ret = 0;
|
||||
unsigned long c;
|
||||
|
||||
kvm_for_each_vcpu(c, vcpu, kvm) {
|
||||
|
@ -569,12 +568,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
|
||||
if (ret) {
|
||||
kvm_err("Unable to register VGICv3 dist MMIO regions\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (kvm_vgic_global_state.has_gicv4_1)
|
||||
vgic_v4_configure_vsgis(kvm);
|
||||
|
||||
|
|
|
@ -184,13 +184,14 @@ static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
/* Must be called with the kvm lock held */
|
||||
void vgic_v4_configure_vsgis(struct kvm *kvm)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long i;
|
||||
|
||||
lockdep_assert_held(&kvm->arch.config_lock);
|
||||
|
||||
kvm_arm_halt_guest(kvm);
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
|
|
|
@ -858,11 +858,17 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
|
|||
}
|
||||
|
||||
static inline void __user *
|
||||
get_sigframe(struct ksignal *ksig, size_t frame_size)
|
||||
get_sigframe(struct ksignal *ksig, struct pt_regs *tregs, size_t frame_size)
|
||||
{
|
||||
unsigned long usp = sigsp(rdusp(), ksig);
|
||||
unsigned long gap = 0;
|
||||
|
||||
return (void __user *)((usp - frame_size) & -8UL);
|
||||
if (CPU_IS_020_OR_030 && tregs->format == 0xb) {
|
||||
/* USP is unreliable so use worst-case value */
|
||||
gap = 256;
|
||||
}
|
||||
|
||||
return (void __user *)((usp - gap - frame_size) & -8UL);
|
||||
}
|
||||
|
||||
static int setup_frame(struct ksignal *ksig, sigset_t *set,
|
||||
|
@ -880,7 +886,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
frame = get_sigframe(ksig, sizeof(*frame) + fsize);
|
||||
frame = get_sigframe(ksig, tregs, sizeof(*frame) + fsize);
|
||||
|
||||
if (fsize)
|
||||
err |= copy_to_user (frame + 1, regs + 1, fsize);
|
||||
|
@ -952,7 +958,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
frame = get_sigframe(ksig, sizeof(*frame));
|
||||
frame = get_sigframe(ksig, tregs, sizeof(*frame));
|
||||
|
||||
if (fsize)
|
||||
err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
|
||||
|
|
|
@ -79,6 +79,7 @@ config MIPS
|
|||
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_NMI
|
||||
select HAVE_PATA_PLATFORM
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -623,17 +624,18 @@ u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
|
|||
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
|
||||
|
||||
/*
|
||||
* There is an errata on the Au1200/Au1550 parts that could result
|
||||
* in "stale" data being DMA'ed. It has to do with the snoop logic on
|
||||
* the cache eviction buffer. DMA_NONCOHERENT is on by default for
|
||||
* these parts. If it is fixed in the future, these dma_cache_inv will
|
||||
* just be nothing more than empty macros. See io.h.
|
||||
* There is an erratum on certain Au1200/Au1550 revisions that could
|
||||
* result in "stale" data being DMA'ed. It has to do with the snoop
|
||||
* logic on the cache eviction buffer. dma_default_coherent is set
|
||||
* to false on these parts.
|
||||
*/
|
||||
dma_cache_wback_inv((unsigned long)buf, nbytes);
|
||||
if (!dma_default_coherent)
|
||||
dma_cache_wback_inv(KSEG0ADDR(buf), nbytes);
|
||||
dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
|
||||
wmb(); /* drain writebuffer */
|
||||
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
|
||||
ctp->chan_ptr->ddma_dbell = 0;
|
||||
wmb(); /* force doorbell write out to dma engine */
|
||||
|
||||
/* Get next descriptor pointer. */
|
||||
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
|
||||
|
@ -685,17 +687,18 @@ u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
|
|||
dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
|
||||
#endif
|
||||
/*
|
||||
* There is an errata on the Au1200/Au1550 parts that could result in
|
||||
* "stale" data being DMA'ed. It has to do with the snoop logic on the
|
||||
* cache eviction buffer. DMA_NONCOHERENT is on by default for these
|
||||
* parts. If it is fixed in the future, these dma_cache_inv will just
|
||||
* be nothing more than empty macros. See io.h.
|
||||
* There is an erratum on certain Au1200/Au1550 revisions that could
|
||||
* result in "stale" data being DMA'ed. It has to do with the snoop
|
||||
* logic on the cache eviction buffer. dma_default_coherent is set
|
||||
* to false on these parts.
|
||||
*/
|
||||
dma_cache_inv((unsigned long)buf, nbytes);
|
||||
if (!dma_default_coherent)
|
||||
dma_cache_inv(KSEG0ADDR(buf), nbytes);
|
||||
dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
|
||||
wmb(); /* drain writebuffer */
|
||||
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
|
||||
ctp->chan_ptr->ddma_dbell = 0;
|
||||
wmb(); /* force doorbell write out to dma engine */
|
||||
|
||||
/* Get next descriptor pointer. */
|
||||
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
|
||||
|
|
|
@ -1502,6 +1502,10 @@ static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
|
|||
break;
|
||||
}
|
||||
break;
|
||||
case PRID_IMP_NETLOGIC_AU13XX:
|
||||
c->cputype = CPU_ALCHEMY;
|
||||
__cpu_name[cpu] = "Au1300";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1863,6 +1867,7 @@ void cpu_probe(void)
|
|||
cpu_probe_mips(c, cpu);
|
||||
break;
|
||||
case PRID_COMP_ALCHEMY:
|
||||
case PRID_COMP_NETLOGIC:
|
||||
cpu_probe_alchemy(c, cpu);
|
||||
break;
|
||||
case PRID_COMP_SIBYTE:
|
||||
|
|
|
@ -158,10 +158,6 @@ static unsigned long __init init_initrd(void)
|
|||
pr_err("initrd start must be page aligned\n");
|
||||
goto disable;
|
||||
}
|
||||
if (initrd_start < PAGE_OFFSET) {
|
||||
pr_err("initrd start < PAGE_OFFSET\n");
|
||||
goto disable;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanitize initrd addresses. For example firmware
|
||||
|
@ -174,6 +170,11 @@ static unsigned long __init init_initrd(void)
|
|||
initrd_end = (unsigned long)__va(end);
|
||||
initrd_start = (unsigned long)__va(__pa(initrd_start));
|
||||
|
||||
if (initrd_start < PAGE_OFFSET) {
|
||||
pr_err("initrd start < PAGE_OFFSET\n");
|
||||
goto disable;
|
||||
}
|
||||
|
||||
ROOT_DEV = Root_RAM0;
|
||||
return PFN_UP(end);
|
||||
disable:
|
||||
|
|
|
@ -130,6 +130,10 @@ config PM
|
|||
config STACKTRACE_SUPPORT
|
||||
def_bool y
|
||||
|
||||
config LOCKDEP_SUPPORT
|
||||
bool
|
||||
default y
|
||||
|
||||
config ISA_DMA_API
|
||||
bool
|
||||
|
||||
|
|
|
@ -1 +1,12 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
config LIGHTWEIGHT_SPINLOCK_CHECK
|
||||
bool "Enable lightweight spinlock checks"
|
||||
depends on SMP && !DEBUG_SPINLOCK
|
||||
default y
|
||||
help
|
||||
Add checks with low performance impact to the spinlock functions
|
||||
to catch memory overwrites at runtime. For more advanced
|
||||
spinlock debugging you should choose the DEBUG_SPINLOCK option
|
||||
which will detect unitialized spinlocks too.
|
||||
If unsure say Y here.
|
||||
|
|
|
@ -48,6 +48,10 @@ void flush_dcache_page(struct page *page);
|
|||
|
||||
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
|
||||
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
||||
#define flush_dcache_mmap_lock_irqsave(mapping, flags) \
|
||||
xa_lock_irqsave(&mapping->i_pages, flags)
|
||||
#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
|
||||
xa_unlock_irqrestore(&mapping->i_pages, flags)
|
||||
|
||||
#define flush_icache_page(vma,page) do { \
|
||||
flush_kernel_dcache_page_addr(page_address(page)); \
|
||||
|
|
|
@ -7,10 +7,26 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/spinlock_types.h>
|
||||
|
||||
#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */
|
||||
|
||||
static inline void arch_spin_val_check(int lock_val)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
|
||||
asm volatile( "andcm,= %0,%1,%%r0\n"
|
||||
".word %2\n"
|
||||
: : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL),
|
||||
"i" (SPINLOCK_BREAK_INSN));
|
||||
}
|
||||
|
||||
static inline int arch_spin_is_locked(arch_spinlock_t *x)
|
||||
{
|
||||
volatile unsigned int *a = __ldcw_align(x);
|
||||
return READ_ONCE(*a) == 0;
|
||||
volatile unsigned int *a;
|
||||
int lock_val;
|
||||
|
||||
a = __ldcw_align(x);
|
||||
lock_val = READ_ONCE(*a);
|
||||
arch_spin_val_check(lock_val);
|
||||
return (lock_val == 0);
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock(arch_spinlock_t *x)
|
||||
|
@ -18,9 +34,18 @@ static inline void arch_spin_lock(arch_spinlock_t *x)
|
|||
volatile unsigned int *a;
|
||||
|
||||
a = __ldcw_align(x);
|
||||
while (__ldcw(a) == 0)
|
||||
do {
|
||||
int lock_val_old;
|
||||
|
||||
lock_val_old = __ldcw(a);
|
||||
arch_spin_val_check(lock_val_old);
|
||||
if (lock_val_old)
|
||||
return; /* got lock */
|
||||
|
||||
/* wait until we should try to get lock again */
|
||||
while (*a == 0)
|
||||
continue;
|
||||
} while (1);
|
||||
}
|
||||
|
||||
static inline void arch_spin_unlock(arch_spinlock_t *x)
|
||||
|
@ -29,15 +54,19 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
|
|||
|
||||
a = __ldcw_align(x);
|
||||
/* Release with ordered store. */
|
||||
__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
|
||||
__asm__ __volatile__("stw,ma %0,0(%1)"
|
||||
: : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory");
|
||||
}
|
||||
|
||||
static inline int arch_spin_trylock(arch_spinlock_t *x)
|
||||
{
|
||||
volatile unsigned int *a;
|
||||
int lock_val;
|
||||
|
||||
a = __ldcw_align(x);
|
||||
return __ldcw(a) != 0;
|
||||
lock_val = __ldcw(a);
|
||||
arch_spin_val_check(lock_val);
|
||||
return lock_val != 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2,13 +2,17 @@
|
|||
#ifndef __ASM_SPINLOCK_TYPES_H
|
||||
#define __ASM_SPINLOCK_TYPES_H
|
||||
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46
|
||||
|
||||
typedef struct {
|
||||
#ifdef CONFIG_PA20
|
||||
volatile unsigned int slock;
|
||||
# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
|
||||
# define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED_VAL }
|
||||
#else
|
||||
volatile unsigned int lock[4];
|
||||
# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
|
||||
# define __ARCH_SPIN_LOCK_UNLOCKED \
|
||||
{ { __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL, \
|
||||
__ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL } }
|
||||
#endif
|
||||
} arch_spinlock_t;
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
|
|||
{
|
||||
struct alt_instr *entry;
|
||||
int index = 0, applied = 0;
|
||||
int num_cpus = num_online_cpus();
|
||||
int num_cpus = num_present_cpus();
|
||||
u16 cond_check;
|
||||
|
||||
cond_check = ALT_COND_ALWAYS |
|
||||
|
|
|
@ -399,6 +399,7 @@ void flush_dcache_page(struct page *page)
|
|||
unsigned long offset;
|
||||
unsigned long addr, old_addr = 0;
|
||||
unsigned long count = 0;
|
||||
unsigned long flags;
|
||||
pgoff_t pgoff;
|
||||
|
||||
if (mapping && !mapping_mapped(mapping)) {
|
||||
|
@ -420,7 +421,7 @@ void flush_dcache_page(struct page *page)
|
|||
* to flush one address here for them all to become coherent
|
||||
* on machines that support equivalent aliasing
|
||||
*/
|
||||
flush_dcache_mmap_lock(mapping);
|
||||
flush_dcache_mmap_lock_irqsave(mapping, flags);
|
||||
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
|
||||
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
|
||||
addr = mpnt->vm_start + offset;
|
||||
|
@ -460,7 +461,7 @@ void flush_dcache_page(struct page *page)
|
|||
}
|
||||
WARN_ON(++count == 4096);
|
||||
}
|
||||
flush_dcache_mmap_unlock(mapping);
|
||||
flush_dcache_mmap_unlock_irqrestore(mapping, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_dcache_page);
|
||||
|
||||
|
|
|
@ -446,11 +446,27 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
|||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
/*
|
||||
* fdc: The data cache line is written back to memory, if and only if
|
||||
* it is dirty, and then invalidated from the data cache.
|
||||
*/
|
||||
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
|
||||
unsigned long addr = (unsigned long) phys_to_virt(paddr);
|
||||
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
case DMA_BIDIRECTIONAL:
|
||||
flush_kernel_dcache_range(addr, size);
|
||||
return;
|
||||
case DMA_FROM_DEVICE:
|
||||
purge_kernel_dcache_range_asm(addr, addr + size);
|
||||
return;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -122,13 +122,18 @@ void machine_power_off(void)
|
|||
/* It seems we have no way to power the system off via
|
||||
* software. The user has to press the button himself. */
|
||||
|
||||
printk(KERN_EMERG "System shut down completed.\n"
|
||||
"Please power this system off now.");
|
||||
printk("Power off or press RETURN to reboot.\n");
|
||||
|
||||
/* prevent soft lockup/stalled CPU messages for endless loop. */
|
||||
rcu_sysrq_start();
|
||||
lockup_detector_soft_poweroff();
|
||||
for (;;);
|
||||
while (1) {
|
||||
/* reboot if user presses RETURN key */
|
||||
if (pdc_iodc_getc() == 13) {
|
||||
printk("Rebooting...\n");
|
||||
machine_restart(NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void (*pm_power_off)(void);
|
||||
|
|
|
@ -47,6 +47,10 @@
|
|||
#include <linux/kgdb.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)
|
||||
#include <asm/spinlock.h>
|
||||
#endif
|
||||
|
||||
#include "../math-emu/math-emu.h" /* for handle_fpe() */
|
||||
|
||||
static void parisc_show_stack(struct task_struct *task,
|
||||
|
@ -291,24 +295,30 @@ static void handle_break(struct pt_regs *regs)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
if (unlikely(iir == PARISC_KPROBES_BREAK_INSN)) {
|
||||
if (unlikely(iir == PARISC_KPROBES_BREAK_INSN && !user_mode(regs))) {
|
||||
parisc_kprobe_break_handler(regs);
|
||||
return;
|
||||
}
|
||||
if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2)) {
|
||||
if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2 && !user_mode(regs))) {
|
||||
parisc_kprobe_ss_handler(regs);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KGDB
|
||||
if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
|
||||
iir == PARISC_KGDB_BREAK_INSN)) {
|
||||
if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
|
||||
iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) {
|
||||
kgdb_handle_exception(9, SIGTRAP, 0, regs);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
|
||||
if ((iir == SPINLOCK_BREAK_INSN) && !user_mode(regs)) {
|
||||
die_if_kernel("Spinlock was trashed", regs, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (unlikely(iir != GDB_BREAK_INSN))
|
||||
parisc_printk_ratelimited(0, regs,
|
||||
KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
|
||||
|
|
|
@ -906,11 +906,17 @@ config DATA_SHIFT
|
|||
|
||||
config ARCH_FORCE_MAX_ORDER
|
||||
int "Order of maximal physically contiguous allocations"
|
||||
range 7 8 if PPC64 && PPC_64K_PAGES
|
||||
default "8" if PPC64 && PPC_64K_PAGES
|
||||
range 12 12 if PPC64 && !PPC_64K_PAGES
|
||||
default "12" if PPC64 && !PPC_64K_PAGES
|
||||
range 8 10 if PPC32 && PPC_16K_PAGES
|
||||
default "8" if PPC32 && PPC_16K_PAGES
|
||||
range 6 10 if PPC32 && PPC_64K_PAGES
|
||||
default "6" if PPC32 && PPC_64K_PAGES
|
||||
range 4 10 if PPC32 && PPC_256K_PAGES
|
||||
default "4" if PPC32 && PPC_256K_PAGES
|
||||
range 10 10
|
||||
default "10"
|
||||
help
|
||||
The kernel page allocator limits the size of maximal physically
|
||||
|
|
|
@ -22,15 +22,15 @@ sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
|
|||
sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
|
||||
crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
|
||||
crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o
|
||||
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp8-ppc.o aesp8-ppc.o
|
||||
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o
|
||||
|
||||
quiet_cmd_perl = PERL $@
|
||||
cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@
|
||||
|
||||
targets += aesp8-ppc.S ghashp8-ppc.S
|
||||
targets += aesp10-ppc.S ghashp10-ppc.S
|
||||
|
||||
$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
|
||||
$(obj)/aesp10-ppc.S $(obj)/ghashp10-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
|
||||
$(call if_changed,perl)
|
||||
|
||||
OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
|
||||
OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y
|
||||
OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y
|
||||
OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y
|
||||
|
|
|
@ -30,15 +30,15 @@ MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com");
|
|||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS_CRYPTO("aes");
|
||||
|
||||
asmlinkage int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
|
||||
asmlinkage int aes_p10_set_encrypt_key(const u8 *userKey, const int bits,
|
||||
void *key);
|
||||
asmlinkage void aes_p8_encrypt(const u8 *in, u8 *out, const void *key);
|
||||
asmlinkage void aes_p10_encrypt(const u8 *in, u8 *out, const void *key);
|
||||
asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
|
||||
void *rkey, u8 *iv, void *Xi);
|
||||
asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
|
||||
void *rkey, u8 *iv, void *Xi);
|
||||
asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]);
|
||||
asmlinkage void gcm_ghash_p8(unsigned char *Xi, unsigned char *Htable,
|
||||
asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
|
||||
unsigned char *aad, unsigned int alen);
|
||||
|
||||
struct aes_key {
|
||||
|
@ -93,7 +93,7 @@ static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
|
|||
gctx->aadLen = alen;
|
||||
i = alen & ~0xf;
|
||||
if (i) {
|
||||
gcm_ghash_p8(nXi, hash->Htable+32, aad, i);
|
||||
gcm_ghash_p10(nXi, hash->Htable+32, aad, i);
|
||||
aad += i;
|
||||
alen -= i;
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
|
|||
nXi[i] ^= aad[i];
|
||||
|
||||
memset(gctx->aad_hash, 0, 16);
|
||||
gcm_ghash_p8(gctx->aad_hash, hash->Htable+32, nXi, 16);
|
||||
gcm_ghash_p10(gctx->aad_hash, hash->Htable+32, nXi, 16);
|
||||
} else {
|
||||
memcpy(gctx->aad_hash, nXi, 16);
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
|
|||
{
|
||||
__be32 counter = cpu_to_be32(1);
|
||||
|
||||
aes_p8_encrypt(hash->H, hash->H, rdkey);
|
||||
aes_p10_encrypt(hash->H, hash->H, rdkey);
|
||||
set_subkey(hash->H);
|
||||
gcm_init_htable(hash->Htable+32, hash->H);
|
||||
|
||||
|
@ -126,7 +126,7 @@ static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
|
|||
/*
|
||||
* Encrypt counter vector as iv tag and increment counter.
|
||||
*/
|
||||
aes_p8_encrypt(iv, gctx->ivtag, rdkey);
|
||||
aes_p10_encrypt(iv, gctx->ivtag, rdkey);
|
||||
|
||||
counter = cpu_to_be32(2);
|
||||
*((__be32 *)(iv+12)) = counter;
|
||||
|
@ -160,7 +160,7 @@ static void finish_tag(struct gcm_ctx *gctx, struct Hash_ctx *hash, int len)
|
|||
/*
|
||||
* hash (AAD len and len)
|
||||
*/
|
||||
gcm_ghash_p8(hash->Htable, hash->Htable+32, aclen, 16);
|
||||
gcm_ghash_p10(hash->Htable, hash->Htable+32, aclen, 16);
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
hash->Htable[i] ^= gctx->ivtag[i];
|
||||
|
@ -192,7 +192,7 @@ static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
|
|||
int ret;
|
||||
|
||||
vsx_begin();
|
||||
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
|
||||
ret = aes_p10_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
|
||||
vsx_end();
|
||||
|
||||
return ret ? -EINVAL : 0;
|
||||
|
|
|
@ -110,7 +110,7 @@ die "can't locate ppc-xlate.pl";
|
|||
open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
|
||||
|
||||
$FRAME=8*$SIZE_T;
|
||||
$prefix="aes_p8";
|
||||
$prefix="aes_p10";
|
||||
|
||||
$sp="r1";
|
||||
$vrsave="r12";
|
|
@ -64,7 +64,7 @@ $code=<<___;
|
|||
|
||||
.text
|
||||
|
||||
.globl .gcm_init_p8
|
||||
.globl .gcm_init_p10
|
||||
lis r0,0xfff0
|
||||
li r8,0x10
|
||||
mfspr $vrsave,256
|
||||
|
@ -110,7 +110,7 @@ $code=<<___;
|
|||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,2,0
|
||||
.long 0
|
||||
.size .gcm_init_p8,.-.gcm_init_p8
|
||||
.size .gcm_init_p10,.-.gcm_init_p10
|
||||
|
||||
.globl .gcm_init_htable
|
||||
lis r0,0xfff0
|
||||
|
@ -237,7 +237,7 @@ $code=<<___;
|
|||
.long 0
|
||||
.size .gcm_init_htable,.-.gcm_init_htable
|
||||
|
||||
.globl .gcm_gmult_p8
|
||||
.globl .gcm_gmult_p10
|
||||
lis r0,0xfff8
|
||||
li r8,0x10
|
||||
mfspr $vrsave,256
|
||||
|
@ -283,9 +283,9 @@ $code=<<___;
|
|||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,2,0
|
||||
.long 0
|
||||
.size .gcm_gmult_p8,.-.gcm_gmult_p8
|
||||
.size .gcm_gmult_p10,.-.gcm_gmult_p10
|
||||
|
||||
.globl .gcm_ghash_p8
|
||||
.globl .gcm_ghash_p10
|
||||
lis r0,0xfff8
|
||||
li r8,0x10
|
||||
mfspr $vrsave,256
|
||||
|
@ -350,7 +350,7 @@ Loop:
|
|||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,4,0
|
||||
.long 0
|
||||
.size .gcm_ghash_p8,.-.gcm_ghash_p8
|
||||
.size .gcm_ghash_p10,.-.gcm_ghash_p10
|
||||
|
||||
.asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
|
||||
.align 2
|
|
@ -317,13 +317,22 @@ static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
|
|||
static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
|
||||
{
|
||||
u64 rc;
|
||||
long rpages = npages;
|
||||
unsigned long limit;
|
||||
|
||||
if (!firmware_has_feature(FW_FEATURE_STUFF_TCE))
|
||||
return tce_free_pSeriesLP(tbl->it_index, tcenum,
|
||||
tbl->it_page_shift, npages);
|
||||
|
||||
rc = plpar_tce_stuff((u64)tbl->it_index,
|
||||
(u64)tcenum << tbl->it_page_shift, 0, npages);
|
||||
do {
|
||||
limit = min_t(unsigned long, rpages, 512);
|
||||
|
||||
rc = plpar_tce_stuff((u64)tbl->it_index,
|
||||
(u64)tcenum << tbl->it_page_shift, 0, limit);
|
||||
|
||||
rpages -= limit;
|
||||
tcenum += limit;
|
||||
} while (rpages > 0 && !rc);
|
||||
|
||||
if (rc && printk_ratelimit()) {
|
||||
printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
|
||||
|
|
|
@ -88,7 +88,7 @@ static unsigned long ndump = 64;
|
|||
static unsigned long nidump = 16;
|
||||
static unsigned long ncsum = 4096;
|
||||
static int termch;
|
||||
static char tmpstr[128];
|
||||
static char tmpstr[KSYM_NAME_LEN];
|
||||
static int tracing_enabled;
|
||||
|
||||
static long bus_error_jmp[JMP_BUF_LEN];
|
||||
|
|
|
@ -799,8 +799,11 @@ menu "Power management options"
|
|||
|
||||
source "kernel/power/Kconfig"
|
||||
|
||||
# Hibernation is only possible on systems where the SBI implementation has
|
||||
# marked its reserved memory as not accessible from, or does not run
|
||||
# from the same memory as, Linux
|
||||
config ARCH_HIBERNATION_POSSIBLE
|
||||
def_bool y
|
||||
def_bool NONPORTABLE
|
||||
|
||||
config ARCH_HIBERNATION_HEADER
|
||||
def_bool HIBERNATION
|
||||
|
|
|
@ -1,2 +1,6 @@
|
|||
ifdef CONFIG_RELOCATABLE
|
||||
KBUILD_CFLAGS += -fno-pie
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_ERRATA_SIFIVE) += sifive/
|
||||
obj-$(CONFIG_ERRATA_THEAD) += thead/
|
||||
|
|
|
@ -36,6 +36,9 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
unsigned long addr, pte_t *ptep,
|
||||
pte_t pte, int dirty);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET
|
||||
pte_t huge_ptep_get(pte_t *ptep);
|
||||
|
||||
pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
|
||||
#define arch_make_huge_pte arch_make_huge_pte
|
||||
|
||||
|
|
|
@ -10,4 +10,11 @@
|
|||
|
||||
#include <linux/perf_event.h>
|
||||
#define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
|
||||
|
||||
#define perf_arch_fetch_caller_regs(regs, __ip) { \
|
||||
(regs)->epc = (__ip); \
|
||||
(regs)->s0 = (unsigned long) __builtin_frame_address(0); \
|
||||
(regs)->sp = current_stack_pointer; \
|
||||
(regs)->status = SR_PP; \
|
||||
}
|
||||
#endif /* _ASM_RISCV_PERF_EVENT_H */
|
||||
|
|
|
@ -23,6 +23,10 @@ ifdef CONFIG_FTRACE
|
|||
CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
ifdef CONFIG_RELOCATABLE
|
||||
CFLAGS_alternative.o += -fno-pie
|
||||
CFLAGS_cpufeature.o += -fno-pie
|
||||
endif
|
||||
ifdef CONFIG_KASAN
|
||||
KASAN_SANITIZE_alternative.o := n
|
||||
KASAN_SANITIZE_cpufeature.o := n
|
||||
|
|
|
@ -3,6 +3,30 @@
|
|||
#include <linux/err.h>
|
||||
|
||||
#ifdef CONFIG_RISCV_ISA_SVNAPOT
|
||||
pte_t huge_ptep_get(pte_t *ptep)
|
||||
{
|
||||
unsigned long pte_num;
|
||||
int i;
|
||||
pte_t orig_pte = ptep_get(ptep);
|
||||
|
||||
if (!pte_present(orig_pte) || !pte_napot(orig_pte))
|
||||
return orig_pte;
|
||||
|
||||
pte_num = napot_pte_num(napot_cont_order(orig_pte));
|
||||
|
||||
for (i = 0; i < pte_num; i++, ptep++) {
|
||||
pte_t pte = ptep_get(ptep);
|
||||
|
||||
if (pte_dirty(pte))
|
||||
orig_pte = pte_mkdirty(orig_pte);
|
||||
|
||||
if (pte_young(pte))
|
||||
orig_pte = pte_mkyoung(orig_pte);
|
||||
}
|
||||
|
||||
return orig_pte;
|
||||
}
|
||||
|
||||
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
|
@ -218,6 +242,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
|||
{
|
||||
pte_t pte = ptep_get(ptep);
|
||||
unsigned long order;
|
||||
pte_t orig_pte;
|
||||
int i, pte_num;
|
||||
|
||||
if (!pte_napot(pte)) {
|
||||
|
@ -228,9 +253,12 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
|||
order = napot_cont_order(pte);
|
||||
pte_num = napot_pte_num(order);
|
||||
ptep = huge_pte_offset(mm, addr, napot_cont_size(order));
|
||||
orig_pte = get_clear_contig_flush(mm, addr, ptep, pte_num);
|
||||
|
||||
orig_pte = pte_wrprotect(orig_pte);
|
||||
|
||||
for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
|
||||
ptep_set_wrprotect(mm, addr, ptep);
|
||||
set_pte_at(mm, addr, ptep, orig_pte);
|
||||
}
|
||||
|
||||
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
|
|
@ -922,9 +922,9 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
|
|||
static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
|
||||
uintptr_t dtb_pa)
|
||||
{
|
||||
#ifndef CONFIG_BUILTIN_DTB
|
||||
uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
|
||||
|
||||
#ifndef CONFIG_BUILTIN_DTB
|
||||
/* Make sure the fdt fixmap address is always aligned on PMD size */
|
||||
BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE));
|
||||
|
||||
|
|
|
@ -773,8 +773,6 @@
|
|||
.octa 0x3F893781E95FE1576CDA64D2BA0CB204
|
||||
|
||||
#ifdef CONFIG_AS_GFNI
|
||||
.section .rodata.cst8, "aM", @progbits, 8
|
||||
.align 8
|
||||
/* AES affine: */
|
||||
#define tf_aff_const BV8(1, 1, 0, 0, 0, 1, 1, 0)
|
||||
.Ltf_aff_bitmatrix:
|
||||
|
|
|
@ -4074,7 +4074,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
|
|||
if (x86_pmu.intel_cap.pebs_baseline) {
|
||||
arr[(*nr)++] = (struct perf_guest_switch_msr){
|
||||
.msr = MSR_PEBS_DATA_CFG,
|
||||
.host = cpuc->pebs_data_cfg,
|
||||
.host = cpuc->active_pebs_data_cfg,
|
||||
.guest = kvm_pmu->pebs_data_cfg,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -6150,6 +6150,7 @@ static struct intel_uncore_type spr_uncore_mdf = {
|
|||
};
|
||||
|
||||
#define UNCORE_SPR_NUM_UNCORE_TYPES 12
|
||||
#define UNCORE_SPR_CHA 0
|
||||
#define UNCORE_SPR_IIO 1
|
||||
#define UNCORE_SPR_IMC 6
|
||||
#define UNCORE_SPR_UPI 8
|
||||
|
@ -6460,12 +6461,22 @@ static int uncore_type_max_boxes(struct intel_uncore_type **types,
|
|||
return max + 1;
|
||||
}
|
||||
|
||||
#define SPR_MSR_UNC_CBO_CONFIG 0x2FFE
|
||||
|
||||
void spr_uncore_cpu_init(void)
|
||||
{
|
||||
struct intel_uncore_type *type;
|
||||
u64 num_cbo;
|
||||
|
||||
uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
|
||||
UNCORE_SPR_MSR_EXTRA_UNCORES,
|
||||
spr_msr_uncores);
|
||||
|
||||
type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA);
|
||||
if (type) {
|
||||
rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
|
||||
type->num_boxes = num_cbo;
|
||||
}
|
||||
spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ extern void fpu_flush_thread(void);
|
|||
static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
|
||||
{
|
||||
if (cpu_feature_enabled(X86_FEATURE_FPU) &&
|
||||
!(current->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
||||
!(current->flags & (PF_KTHREAD | PF_USER_WORKER))) {
|
||||
save_fpregs_to_fpstate(old_fpu);
|
||||
/*
|
||||
* The save operation preserved register state, so the
|
||||
|
|
|
@ -79,7 +79,7 @@ int detect_extended_topology_early(struct cpuinfo_x86 *c)
|
|||
* initial apic id, which also represents 32-bit extended x2apic id.
|
||||
*/
|
||||
c->initial_apicid = edx;
|
||||
smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||
smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
@ -109,7 +109,8 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
|||
*/
|
||||
cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
|
||||
c->initial_apicid = edx;
|
||||
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||
core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||
smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
|
||||
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||
pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
|
|
|
@ -195,7 +195,6 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
|||
printk("%sCall Trace:\n", log_lvl);
|
||||
|
||||
unwind_start(&state, task, regs, stack);
|
||||
stack = stack ? : get_stack_pointer(task, regs);
|
||||
regs = unwind_get_entry_regs(&state, &partial);
|
||||
|
||||
/*
|
||||
|
@ -214,9 +213,13 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
|||
* - hardirq stack
|
||||
* - entry stack
|
||||
*/
|
||||
for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
|
||||
for (stack = stack ?: get_stack_pointer(task, regs);
|
||||
stack;
|
||||
stack = stack_info.next_sp) {
|
||||
const char *stack_name;
|
||||
|
||||
stack = PTR_ALIGN(stack, sizeof(long));
|
||||
|
||||
if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
|
||||
/*
|
||||
* We weren't on a valid stack. It's possible that
|
||||
|
|
|
@ -57,7 +57,7 @@ static inline void fpregs_restore_userregs(void)
|
|||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_IO_WORKER)))
|
||||
if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_USER_WORKER)))
|
||||
return;
|
||||
|
||||
if (!fpregs_state_valid(fpu, cpu)) {
|
||||
|
|
|
@ -426,7 +426,7 @@ void kernel_fpu_begin_mask(unsigned int kfpu_mask)
|
|||
|
||||
this_cpu_write(in_kernel_fpu, true);
|
||||
|
||||
if (!(current->flags & (PF_KTHREAD | PF_IO_WORKER)) &&
|
||||
if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) &&
|
||||
!test_thread_flag(TIF_NEED_FPU_LOAD)) {
|
||||
set_thread_flag(TIF_NEED_FPU_LOAD);
|
||||
save_fpregs_to_fpstate(¤t->thread.fpu);
|
||||
|
|
|
@ -228,6 +228,23 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
|
|||
u32 xapic_id = kvm_xapic_id(apic);
|
||||
u32 physical_id;
|
||||
|
||||
/*
|
||||
* For simplicity, KVM always allocates enough space for all possible
|
||||
* xAPIC IDs. Yell, but don't kill the VM, as KVM can continue on
|
||||
* without the optimized map.
|
||||
*/
|
||||
if (WARN_ON_ONCE(xapic_id > new->max_apic_id))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Bail if a vCPU was added and/or enabled its APIC between allocating
|
||||
* the map and doing the actual calculations for the map. Note, KVM
|
||||
* hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if
|
||||
* the compiler decides to reload x2apic_id after this check.
|
||||
*/
|
||||
if (x2apic_id > new->max_apic_id)
|
||||
return -E2BIG;
|
||||
|
||||
/*
|
||||
* Deliberately truncate the vCPU ID when detecting a mismatched APIC
|
||||
* ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a
|
||||
|
@ -253,8 +270,7 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
|
|||
*/
|
||||
if (vcpu->kvm->arch.x2apic_format) {
|
||||
/* See also kvm_apic_match_physical_addr(). */
|
||||
if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
|
||||
x2apic_id <= new->max_apic_id)
|
||||
if (apic_x2apic_mode(apic) || x2apic_id > 0xff)
|
||||
new->phys_map[x2apic_id] = apic;
|
||||
|
||||
if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
|
||||
|
|
|
@ -7091,7 +7091,10 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm)
|
|||
*/
|
||||
slot = NULL;
|
||||
if (atomic_read(&kvm->nr_memslots_dirty_logging)) {
|
||||
slot = gfn_to_memslot(kvm, sp->gfn);
|
||||
struct kvm_memslots *slots;
|
||||
|
||||
slots = kvm_memslots_for_spte_role(kvm, sp->role);
|
||||
slot = __gfn_to_memslot(slots, sp->gfn);
|
||||
WARN_ON_ONCE(!slot);
|
||||
}
|
||||
|
||||
|
|
|
@ -3510,7 +3510,7 @@ static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu)
|
|||
if (!is_vnmi_enabled(svm))
|
||||
return false;
|
||||
|
||||
return !!(svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK);
|
||||
return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK);
|
||||
}
|
||||
|
||||
static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -10758,6 +10758,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Note, VM-Exits that go down the "slow" path are accounted below. */
|
||||
++vcpu->stat.exits;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
|
@ -29,7 +31,7 @@
|
|||
*/
|
||||
SYM_FUNC_START(rep_movs_alternative)
|
||||
cmpq $64,%rcx
|
||||
jae .Lunrolled
|
||||
jae .Llarge
|
||||
|
||||
cmp $8,%ecx
|
||||
jae .Lword
|
||||
|
@ -65,6 +67,12 @@ SYM_FUNC_START(rep_movs_alternative)
|
|||
_ASM_EXTABLE_UA( 2b, .Lcopy_user_tail)
|
||||
_ASM_EXTABLE_UA( 3b, .Lcopy_user_tail)
|
||||
|
||||
.Llarge:
|
||||
0: ALTERNATIVE "jmp .Lunrolled", "rep movsb", X86_FEATURE_ERMS
|
||||
1: RET
|
||||
|
||||
_ASM_EXTABLE_UA( 0b, 1b)
|
||||
|
||||
.p2align 4
|
||||
.Lunrolled:
|
||||
10: movq (%rsi),%r8
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/sched/task.h>
|
||||
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/e820/api.h>
|
||||
#include <asm/init.h>
|
||||
#include <asm/page.h>
|
||||
|
@ -261,6 +262,24 @@ static void __init probe_page_size_mask(void)
|
|||
}
|
||||
}
|
||||
|
||||
#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \
|
||||
.family = 6, \
|
||||
.model = _model, \
|
||||
}
|
||||
/*
|
||||
* INVLPG may not properly flush Global entries
|
||||
* on these CPUs when PCIDs are enabled.
|
||||
*/
|
||||
static const struct x86_cpu_id invlpg_miss_ids[] = {
|
||||
INTEL_MATCH(INTEL_FAM6_ALDERLAKE ),
|
||||
INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
|
||||
INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ),
|
||||
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ),
|
||||
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
|
||||
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
|
||||
{}
|
||||
};
|
||||
|
||||
static void setup_pcid(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_X86_64))
|
||||
|
@ -269,6 +288,12 @@ static void setup_pcid(void)
|
|||
if (!boot_cpu_has(X86_FEATURE_PCID))
|
||||
return;
|
||||
|
||||
if (x86_match_cpu(invlpg_miss_ids)) {
|
||||
pr_info("Incomplete global flushes, disabling PCID");
|
||||
setup_clear_cpu_cap(X86_FEATURE_PCID);
|
||||
return;
|
||||
}
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_PGE)) {
|
||||
/*
|
||||
* This can't be cr4_set_bits_and_update_boot() -- the
|
||||
|
|
|
@ -198,7 +198,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
|
|||
i++;
|
||||
}
|
||||
kfree(v);
|
||||
return 0;
|
||||
return msi_device_populate_sysfs(&dev->dev);
|
||||
|
||||
error:
|
||||
if (ret == -ENOSYS)
|
||||
|
@ -254,7 +254,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
|
|||
dev_dbg(&dev->dev,
|
||||
"xen: msi --> pirq=%d --> irq=%d\n", pirq, irq);
|
||||
}
|
||||
return 0;
|
||||
return msi_device_populate_sysfs(&dev->dev);
|
||||
|
||||
error:
|
||||
dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n",
|
||||
|
@ -346,7 +346,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
|
|||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
ret = 0;
|
||||
ret = msi_device_populate_sysfs(&dev->dev);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -394,6 +394,8 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev)
|
|||
xen_destroy_irq(msidesc->irq + i);
|
||||
msidesc->irq = 0;
|
||||
}
|
||||
|
||||
msi_device_destroy_sysfs(&dev->dev);
|
||||
}
|
||||
|
||||
static void xen_pv_teardown_msi_irqs(struct pci_dev *dev)
|
||||
|
|
|
@ -343,7 +343,19 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
|
|||
struct rt_sigframe *frame;
|
||||
int err = 0, sig = ksig->sig;
|
||||
unsigned long sp, ra, tp, ps;
|
||||
unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
|
||||
unsigned long handler_fdpic_GOT = 0;
|
||||
unsigned int base;
|
||||
bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) &&
|
||||
(current->personality & FDPIC_FUNCPTRS);
|
||||
|
||||
if (fdpic) {
|
||||
unsigned long __user *fdpic_func_desc =
|
||||
(unsigned long __user *)handler;
|
||||
if (__get_user(handler, &fdpic_func_desc[0]) ||
|
||||
__get_user(handler_fdpic_GOT, &fdpic_func_desc[1]))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
sp = regs->areg[1];
|
||||
|
||||
|
@ -373,20 +385,26 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
|
|||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
|
||||
ra = (unsigned long)ksig->ka.sa.sa_restorer;
|
||||
if (fdpic) {
|
||||
unsigned long __user *fdpic_func_desc =
|
||||
(unsigned long __user *)ksig->ka.sa.sa_restorer;
|
||||
|
||||
err |= __get_user(ra, fdpic_func_desc);
|
||||
} else {
|
||||
ra = (unsigned long)ksig->ka.sa.sa_restorer;
|
||||
}
|
||||
} else {
|
||||
|
||||
/* Create sys_rt_sigreturn syscall in stack frame */
|
||||
|
||||
err |= gen_return_code(frame->retcode);
|
||||
|
||||
if (err) {
|
||||
return -EFAULT;
|
||||
}
|
||||
ra = (unsigned long) frame->retcode;
|
||||
}
|
||||
|
||||
/*
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
* Create signal handler execution context.
|
||||
* Return context not modified until this point.
|
||||
*/
|
||||
|
@ -394,8 +412,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
|
|||
/* Set up registers for signal handler; preserve the threadptr */
|
||||
tp = regs->threadptr;
|
||||
ps = regs->ps;
|
||||
start_thread(regs, (unsigned long) ksig->ka.sa.sa_handler,
|
||||
(unsigned long) frame);
|
||||
start_thread(regs, handler, (unsigned long)frame);
|
||||
|
||||
/* Set up a stack frame for a call4 if userspace uses windowed ABI */
|
||||
if (ps & PS_WOE_MASK) {
|
||||
|
@ -413,6 +430,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
|
|||
regs->areg[base + 4] = (unsigned long) &frame->uc;
|
||||
regs->threadptr = tp;
|
||||
regs->ps = ps;
|
||||
if (fdpic)
|
||||
regs->areg[base + 11] = handler_fdpic_GOT;
|
||||
|
||||
pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n",
|
||||
current->comm, current->pid, sig, frame, regs->pc);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue