dmaengine updates for v6.4
New support: - Apple admac t8112 device support - StarFive JH7110 DMA controller Updates: - Big pile of idxd updates to support IAA 2.0 device capabilities, DSA 2.0 Event Log and completion record faulting features and new DSA operations - at_xdmac supend & resume updates and driver code cleanup - k3-udma supend & resume support - k3-psil thread support for J784s4 -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmRSOJYACgkQfBQHDyUj g0fmUQ//ZmrcNgbubI8evje8oovapLZUqdCgxoq9RbUbJ0TA6y7QCZ4ebkQYQymA LCJSV+NpG3++gLXLmIoYWiWXCabHZWDPh5M+uUiRm51MV2AtKeHy7UzQolmOWb6E d4JYf4HdGvzto7nMM7zo/PDaEDKdkostJNb7ZFYZrRqqCbSSSTJYmNGg3PfYRfaw yJpQsOjizgboM8EciKbqLh9gX3+FfQGhKsnkaiGQBwtdZD29LHeC7UoQrslkH9uh EiGTMYPKCZIAb+75G4WSO3sCNZcShQPEQt8w3SBP9X6eyaH75AZPymSRhF6O8gte ecGW/mRzc7t8lOrRnzD1zvZ/4NgwlnxTQAl+561XwWjmZIW/Zd9dH8dHFzwS5alS z7uVperSYXoEo//UmmwuhuZXlOWD4muOswWB4I5lTeHW97OUyccU5S3DhxYnffI6 P8qnGMmxLj8nsmk98T8TsOmzdb5txxSbPP+PumvDVF7g9LoFKfz8wDAZG3buTsJP L/HR9sAFaFO0GrjoC7EsHJsiLauSzzcDnYIUJ5UIv/6Ogf2mUCrBhgwSmVhIeR26 hUzYAdpwmhrrVXtbMXuec7jAz62bqkACdJIWGd21t/FTzUNUaDv5OimiNEZncpnY 9q2cT9x94iHHVCl430jlIAOa/oF2AQpeqs4any7+OI2xwiJEA7g= =Vqrw -----END PGP SIGNATURE----- Merge tag 'dmaengine-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine Pull dmaengine updates from Vinod Koul: "New support: - Apple admac t8112 device support - StarFive JH7110 DMA controller Updates: - Big pile of idxd updates to support IAA 2.0 device capabilities, DSA 2.0 Event Log and completion record faulting features and new DSA operations - at_xdmac supend & resume updates and driver code cleanup - k3-udma supend & resume support - k3-psil thread support for J784s4" * tag 'dmaengine-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (57 commits) dmaengine: idxd: add per wq PRS disable dmaengine: idxd: add pid to exported sysfs attribute for opened file dmaengine: idxd: expose fault counters to sysfs dmaengine: idxd: add a device to represent the file opened dmaengine: idxd: add per file user counters for completion record faults dmaengine: idxd: process batch descriptor completion record faults dmaengine: idxd: add descs_completed field for completion record dmaengine: idxd: process user page faults for completion record dmaengine: idxd: add idxd_copy_cr() to copy user completion record during page fault handling dmaengine: idxd: create kmem cache for event log fault items dmaengine: idxd: add per DSA wq workqueue for processing cr faults dmanegine: idxd: add debugfs for event log dump dmaengine: idxd: add interrupt handling for event log dmaengine: idxd: setup event log configuration dmaengine: idxd: add event log size sysfs attribute dmaengine: idxd: make misc interrupt one shot dt-bindings: dma: snps,dw-axi-dmac: constrain the items of resets for JH7110 dma dt-bindings: dma: Drop unneeded quotes dmaengine: at_xdmac: align declaration of ret with the rest of variables dmaengine: at_xdmac: add a warning message regarding for unpaused channels ...
This commit is contained in:
commit
7994beabfb
|
@ -136,6 +136,22 @@ Description: The last executed device administrative command's status/error.
|
|||
Also last configuration error overloaded.
|
||||
Writing to it will clear the status.
|
||||
|
||||
What: /sys/bus/dsa/devices/dsa<m>/iaa_cap
|
||||
Date: Sept 14, 2022
|
||||
KernelVersion: 6.0.0
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: IAA (IAX) capability mask. Exported to user space for application
|
||||
consumption. This attribute should only be visible on IAA devices
|
||||
that are version 2 or later.
|
||||
|
||||
What: /sys/bus/dsa/devices/dsa<m>/event_log_size
|
||||
Date: Sept 14, 2022
|
||||
KernelVersion: 6.4.0
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: The event log size to be configured. Default is 64 entries and
|
||||
occupies 4k size if the evl entry is 64 bytes. It's visible
|
||||
only on platforms that support the capability.
|
||||
|
||||
What: /sys/bus/dsa/devices/wq<m>.<n>/block_on_fault
|
||||
Date: Oct 27, 2020
|
||||
KernelVersion: 5.11.0
|
||||
|
@ -219,6 +235,16 @@ Contact: dmaengine@vger.kernel.org
|
|||
Description: Indicate whether ATS disable is turned on for the workqueue.
|
||||
0 indicates ATS is on, and 1 indicates ATS is off for the workqueue.
|
||||
|
||||
What: /sys/bus/dsa/devices/wq<m>.<n>/prs_disable
|
||||
Date: Sept 14, 2022
|
||||
KernelVersion: 6.4.0
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: Controls whether PRS disable is turned on for the workqueue.
|
||||
0 indicates PRS is on, and 1 indicates PRS is off for the
|
||||
workqueue. This option overrides block_on_fault attribute
|
||||
if set. It's visible only on platforms that support the
|
||||
capability.
|
||||
|
||||
What: /sys/bus/dsa/devices/wq<m>.<n>/occupancy
|
||||
Date May 25, 2021
|
||||
KernelVersion: 5.14.0
|
||||
|
@ -302,3 +328,28 @@ Description: Allows control of the number of batch descriptors that can be
|
|||
1 (1/2 of max value), 2 (1/4 of the max value), and 3 (1/8 of
|
||||
the max value). It's visible only on platforms that support
|
||||
the capability.
|
||||
|
||||
What: /sys/bus/dsa/devices/wq<m>.<n>/dsa<x>\!wq<m>.<n>/file<y>/cr_faults
|
||||
Date: Sept 14, 2022
|
||||
KernelVersion: 6.4.0
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: Show the number of Completion Record (CR) faults this application
|
||||
has caused.
|
||||
|
||||
What: /sys/bus/dsa/devices/wq<m>.<n>/dsa<x>\!wq<m>.<n>/file<y>/cr_fault_failures
|
||||
Date: Sept 14, 2022
|
||||
KernelVersion: 6.4.0
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: Show the number of Completion Record (CR) faults failures that this
|
||||
application has caused. The failure counter is incremented when the
|
||||
driver cannot fault in the address for the CR. Typically this is caused
|
||||
by a bad address programmed in the submitted descriptor or a malicious
|
||||
submitter is using bad CR address on purpose.
|
||||
|
||||
What: /sys/bus/dsa/devices/wq<m>.<n>/dsa<x>\!wq<m>.<n>/file<y>/pid
|
||||
Date: Sept 14, 2022
|
||||
KernelVersion: 6.4.0
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: Show the process id of the application that opened the file. This is
|
||||
helpful information for a monitor daemon that wants to kill the
|
||||
application that opened the file.
|
||||
|
|
|
@ -26,6 +26,7 @@ properties:
|
|||
- enum:
|
||||
- apple,t6000-admac
|
||||
- apple,t8103-admac
|
||||
- apple,t8112-admac
|
||||
- const: apple,admac
|
||||
|
||||
reg:
|
||||
|
|
|
@ -24,6 +24,7 @@ properties:
|
|||
- qcom,sm6350-gpi-dma
|
||||
- items:
|
||||
- enum:
|
||||
- qcom,qcm2290-gpi-dma
|
||||
- qcom,qdu1000-gpi-dma
|
||||
- qcom,sc7280-gpi-dma
|
||||
- qcom,sm6115-gpi-dma
|
||||
|
|
|
@ -54,6 +54,11 @@ properties:
|
|||
- description: DMA main clock
|
||||
- description: DMA register access clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: main
|
||||
- const: register
|
||||
|
||||
'#dma-cells':
|
||||
const: 1
|
||||
description:
|
||||
|
@ -77,16 +82,23 @@ properties:
|
|||
- description: Reset for DMA ARESETN reset terminal
|
||||
- description: Reset for DMA RST_ASYNC reset terminal
|
||||
|
||||
reset-names:
|
||||
items:
|
||||
- const: arst
|
||||
- const: rst_async
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- interrupt-names
|
||||
- clocks
|
||||
- clock-names
|
||||
- '#dma-cells'
|
||||
- dma-channels
|
||||
- power-domains
|
||||
- resets
|
||||
- reset-names
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
|
@ -124,9 +136,11 @@ examples:
|
|||
"ch12", "ch13", "ch14", "ch15";
|
||||
clocks = <&cpg CPG_MOD R9A07G044_DMAC_ACLK>,
|
||||
<&cpg CPG_MOD R9A07G044_DMAC_PCLK>;
|
||||
clock-names = "main", "register";
|
||||
power-domains = <&cpg>;
|
||||
resets = <&cpg R9A07G044_DMAC_ARESETN>,
|
||||
<&cpg R9A07G044_DMAC_RST_ASYNC>;
|
||||
reset-names = "arst", "rst_async";
|
||||
#dma-cells = <1>;
|
||||
dma-channels = <16>;
|
||||
};
|
||||
|
|
|
@ -20,6 +20,7 @@ properties:
|
|||
enum:
|
||||
- snps,axi-dma-1.01a
|
||||
- intel,kmb-axi-dma
|
||||
- starfive,jh7110-axi-dma
|
||||
|
||||
reg:
|
||||
minItems: 1
|
||||
|
@ -58,7 +59,8 @@ properties:
|
|||
maximum: 8
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
snps,dma-masters:
|
||||
description: |
|
||||
|
@ -109,6 +111,25 @@ required:
|
|||
- snps,priority
|
||||
- snps,block-size
|
||||
|
||||
if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- starfive,jh7110-axi-dma
|
||||
then:
|
||||
properties:
|
||||
resets:
|
||||
minItems: 2
|
||||
items:
|
||||
- description: AXI reset line
|
||||
- description: AHB reset line
|
||||
- description: module reset
|
||||
else:
|
||||
properties:
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
|
|
|
@ -43,7 +43,7 @@ description: |
|
|||
configuration of the legacy peripheral.
|
||||
|
||||
allOf:
|
||||
- $ref: "../dma-controller.yaml#"
|
||||
- $ref: ../dma-controller.yaml#
|
||||
- $ref: /schemas/arm/keystone/ti,k3-sci-common.yaml#
|
||||
|
||||
properties:
|
||||
|
|
|
@ -15,7 +15,7 @@ maintainers:
|
|||
- Michael Tretter <m.tretter@pengutronix.de>
|
||||
|
||||
allOf:
|
||||
- $ref: "../dma-controller.yaml#"
|
||||
- $ref: ../dma-controller.yaml#
|
||||
|
||||
properties:
|
||||
"#dma-cells":
|
||||
|
|
|
@ -16,7 +16,7 @@ maintainers:
|
|||
- Laurent Pinchart <laurent.pinchart@ideasonboard.com>
|
||||
|
||||
allOf:
|
||||
- $ref: "../dma-controller.yaml#"
|
||||
- $ref: ../dma-controller.yaml#
|
||||
|
||||
properties:
|
||||
"#dma-cells":
|
||||
|
|
|
@ -623,6 +623,7 @@ config TEGRA186_GPC_DMA
|
|||
depends on (ARCH_TEGRA || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT
|
||||
depends on IOMMU_API
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
Support for the NVIDIA Tegra General Purpose Central DMA controller.
|
||||
The DMA controller has multiple DMA channels which can be configured
|
||||
|
|
|
@ -187,6 +187,7 @@
|
|||
enum atc_status {
|
||||
AT_XDMAC_CHAN_IS_CYCLIC = 0,
|
||||
AT_XDMAC_CHAN_IS_PAUSED,
|
||||
AT_XDMAC_CHAN_IS_PAUSED_INTERNAL,
|
||||
};
|
||||
|
||||
struct at_xdmac_layout {
|
||||
|
@ -245,6 +246,7 @@ struct at_xdmac {
|
|||
int irq;
|
||||
struct clk *clk;
|
||||
u32 save_gim;
|
||||
u32 save_gs;
|
||||
struct dma_pool *at_xdmac_desc_pool;
|
||||
const struct at_xdmac_layout *layout;
|
||||
struct at_xdmac_chan chan[];
|
||||
|
@ -347,6 +349,11 @@ static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
|
|||
return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
|
||||
}
|
||||
|
||||
static inline int at_xdmac_chan_is_paused_internal(struct at_xdmac_chan *atchan)
|
||||
{
|
||||
return test_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
|
||||
}
|
||||
|
||||
static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
|
||||
{
|
||||
return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
|
||||
|
@ -412,7 +419,7 @@ static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void at_xdmac_off(struct at_xdmac *atxdmac)
|
||||
static void at_xdmac_off(struct at_xdmac *atxdmac, bool suspend_descriptors)
|
||||
{
|
||||
struct dma_chan *chan, *_chan;
|
||||
struct at_xdmac_chan *atchan;
|
||||
|
@ -431,7 +438,7 @@ static void at_xdmac_off(struct at_xdmac *atxdmac)
|
|||
at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
|
||||
|
||||
/* Decrement runtime PM ref counter for each active descriptor. */
|
||||
if (!list_empty(&atxdmac->dma.channels)) {
|
||||
if (!list_empty(&atxdmac->dma.channels) && suspend_descriptors) {
|
||||
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels,
|
||||
device_node) {
|
||||
atchan = to_at_xdmac_chan(chan);
|
||||
|
@ -1898,6 +1905,26 @@ static int at_xdmac_device_config(struct dma_chan *chan,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void at_xdmac_device_pause_set(struct at_xdmac *atxdmac,
|
||||
struct at_xdmac_chan *atchan)
|
||||
{
|
||||
at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
|
||||
while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) &
|
||||
(AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
static void at_xdmac_device_pause_internal(struct at_xdmac_chan *atchan)
|
||||
{
|
||||
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
set_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
|
||||
at_xdmac_device_pause_set(atxdmac, atchan);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
}
|
||||
|
||||
static int at_xdmac_device_pause(struct dma_chan *chan)
|
||||
{
|
||||
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||||
|
@ -1915,11 +1942,8 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
|
|||
return ret;
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
|
||||
while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
|
||||
& (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
|
||||
cpu_relax();
|
||||
|
||||
at_xdmac_device_pause_set(atxdmac, atchan);
|
||||
/* Decrement runtime PM ref counter for each active descriptor. */
|
||||
at_xdmac_runtime_suspend_descriptors(atchan);
|
||||
|
||||
|
@ -1931,6 +1955,17 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void at_xdmac_device_resume_internal(struct at_xdmac_chan *atchan)
|
||||
{
|
||||
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
|
||||
clear_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
}
|
||||
|
||||
static int at_xdmac_device_resume(struct dma_chan *chan)
|
||||
{
|
||||
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||||
|
@ -2118,19 +2153,26 @@ static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
|
|||
|
||||
atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
|
||||
if (at_xdmac_chan_is_cyclic(atchan)) {
|
||||
if (!at_xdmac_chan_is_paused(atchan))
|
||||
at_xdmac_device_pause(chan);
|
||||
if (!at_xdmac_chan_is_paused(atchan)) {
|
||||
dev_warn(chan2dev(chan), "%s: channel %d not paused\n",
|
||||
__func__, chan->chan_id);
|
||||
at_xdmac_device_pause_internal(atchan);
|
||||
at_xdmac_runtime_suspend_descriptors(atchan);
|
||||
}
|
||||
atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
|
||||
atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
|
||||
atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
|
||||
}
|
||||
|
||||
at_xdmac_runtime_suspend_descriptors(atchan);
|
||||
}
|
||||
atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
|
||||
atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);
|
||||
|
||||
at_xdmac_off(atxdmac);
|
||||
return pm_runtime_force_suspend(atxdmac->dev);
|
||||
at_xdmac_off(atxdmac, false);
|
||||
pm_runtime_mark_last_busy(atxdmac->dev);
|
||||
pm_runtime_put_noidle(atxdmac->dev);
|
||||
clk_disable_unprepare(atxdmac->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused atmel_xdmac_resume(struct device *dev)
|
||||
|
@ -2139,13 +2181,14 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
|
|||
struct at_xdmac_chan *atchan;
|
||||
struct dma_chan *chan, *_chan;
|
||||
struct platform_device *pdev = container_of(dev, struct platform_device, dev);
|
||||
int i;
|
||||
int ret;
|
||||
int i, ret;
|
||||
|
||||
ret = pm_runtime_force_resume(atxdmac->dev);
|
||||
if (ret < 0)
|
||||
ret = clk_prepare_enable(atxdmac->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pm_runtime_get_noresume(atxdmac->dev);
|
||||
|
||||
at_xdmac_axi_config(pdev);
|
||||
|
||||
/* Clear pending interrupts. */
|
||||
|
@ -2159,19 +2202,33 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
|
|||
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
|
||||
atchan = to_at_xdmac_chan(chan);
|
||||
|
||||
ret = at_xdmac_runtime_resume_descriptors(atchan);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
|
||||
if (at_xdmac_chan_is_cyclic(atchan)) {
|
||||
if (at_xdmac_chan_is_paused(atchan))
|
||||
at_xdmac_device_resume(chan);
|
||||
/*
|
||||
* Resume only channels not explicitly paused by
|
||||
* consumers.
|
||||
*/
|
||||
if (at_xdmac_chan_is_paused_internal(atchan)) {
|
||||
ret = at_xdmac_runtime_resume_descriptors(atchan);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
at_xdmac_device_resume_internal(atchan);
|
||||
}
|
||||
|
||||
/*
|
||||
* We may resume from a deep sleep state where power
|
||||
* to DMA controller is cut-off. Thus, restore the
|
||||
* suspend state of channels set though dmaengine API.
|
||||
*/
|
||||
else if (at_xdmac_chan_is_paused(atchan))
|
||||
at_xdmac_device_pause_set(atxdmac, atchan);
|
||||
|
||||
at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
|
||||
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
|
||||
at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
|
||||
wmb();
|
||||
at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
|
||||
if (atxdmac->save_gs & atchan->mask)
|
||||
at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2312,7 +2369,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
|
|||
INIT_LIST_HEAD(&atxdmac->dma.channels);
|
||||
|
||||
/* Disable all chans and interrupts. */
|
||||
at_xdmac_off(atxdmac);
|
||||
at_xdmac_off(atxdmac, true);
|
||||
|
||||
for (i = 0; i < nr_channels; i++) {
|
||||
struct at_xdmac_chan *atchan = &atxdmac->chan[i];
|
||||
|
@ -2376,7 +2433,7 @@ static int at_xdmac_remove(struct platform_device *pdev)
|
|||
struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
|
||||
int i;
|
||||
|
||||
at_xdmac_off(atxdmac);
|
||||
at_xdmac_off(atxdmac, true);
|
||||
of_dma_controller_free(pdev->dev.of_node);
|
||||
dma_async_device_unregister(&atxdmac->dma);
|
||||
pm_runtime_disable(atxdmac->dev);
|
||||
|
|
|
@ -38,7 +38,7 @@ int bcom_sram_init(struct device_node *sram_node, char *owner)
|
|||
{
|
||||
int rv;
|
||||
const u32 *regaddr_p;
|
||||
u64 regaddr64, size64;
|
||||
struct resource res;
|
||||
unsigned int psize;
|
||||
|
||||
/* Create our state struct */
|
||||
|
@ -56,21 +56,18 @@ int bcom_sram_init(struct device_node *sram_node, char *owner)
|
|||
}
|
||||
|
||||
/* Get address and size of the sram */
|
||||
regaddr_p = of_get_address(sram_node, 0, &size64, NULL);
|
||||
if (!regaddr_p) {
|
||||
rv = of_address_to_resource(sram_node, 0, &res);
|
||||
if (rv) {
|
||||
printk(KERN_ERR "%s: bcom_sram_init: "
|
||||
"Invalid device node !\n", owner);
|
||||
rv = -EINVAL;
|
||||
goto error_free;
|
||||
}
|
||||
|
||||
regaddr64 = of_translate_address(sram_node, regaddr_p);
|
||||
|
||||
bcom_sram->base_phys = (phys_addr_t) regaddr64;
|
||||
bcom_sram->size = (unsigned int) size64;
|
||||
bcom_sram->base_phys = res.start;
|
||||
bcom_sram->size = resource_size(&res);
|
||||
|
||||
/* Request region */
|
||||
if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) {
|
||||
if (!request_mem_region(res.start, resource_size(&res), owner)) {
|
||||
printk(KERN_ERR "%s: bcom_sram_init: "
|
||||
"Couldn't request region !\n", owner);
|
||||
rv = -EBUSY;
|
||||
|
@ -79,7 +76,7 @@ int bcom_sram_init(struct device_node *sram_node, char *owner)
|
|||
|
||||
/* Map SRAM */
|
||||
/* sram is not really __iomem */
|
||||
bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size);
|
||||
bcom_sram->base_virt = (void *)ioremap(res.start, resource_size(&res));
|
||||
|
||||
if (!bcom_sram->base_virt) {
|
||||
printk(KERN_ERR "%s: bcom_sram_init: "
|
||||
|
@ -120,7 +117,7 @@ int bcom_sram_init(struct device_node *sram_node, char *owner)
|
|||
return 0;
|
||||
|
||||
error_release:
|
||||
release_mem_region(bcom_sram->base_phys, bcom_sram->size);
|
||||
release_mem_region(res.start, resource_size(&res));
|
||||
error_free:
|
||||
kfree(bcom_sram);
|
||||
bcom_sram = NULL;
|
||||
|
|
|
@ -21,10 +21,12 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
|
@ -46,6 +48,10 @@
|
|||
DMA_SLAVE_BUSWIDTH_32_BYTES | \
|
||||
DMA_SLAVE_BUSWIDTH_64_BYTES)
|
||||
|
||||
#define AXI_DMA_FLAG_HAS_APB_REGS BIT(0)
|
||||
#define AXI_DMA_FLAG_HAS_RESETS BIT(1)
|
||||
#define AXI_DMA_FLAG_USE_CFG2 BIT(2)
|
||||
|
||||
static inline void
|
||||
axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
|
||||
{
|
||||
|
@ -86,7 +92,8 @@ static inline void axi_chan_config_write(struct axi_dma_chan *chan,
|
|||
|
||||
cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |
|
||||
config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
|
||||
if (chan->chip->dw->hdata->reg_map_8_channels) {
|
||||
if (chan->chip->dw->hdata->reg_map_8_channels &&
|
||||
!chan->chip->dw->hdata->use_cfg2) {
|
||||
cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |
|
||||
config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |
|
||||
config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |
|
||||
|
@ -1140,7 +1147,7 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
|
|||
axi_chan_disable(chan);
|
||||
|
||||
ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
|
||||
!(val & chan_active), 1000, 10000);
|
||||
!(val & chan_active), 1000, 50000);
|
||||
if (ret == -ETIMEDOUT)
|
||||
dev_warn(dchan2dev(dchan),
|
||||
"%s failed to stop\n", axi_chan_name(chan));
|
||||
|
@ -1367,10 +1374,11 @@ static int parse_device_properties(struct axi_dma_chip *chip)
|
|||
|
||||
static int dw_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
struct axi_dma_chip *chip;
|
||||
struct dw_axi_dma *dw;
|
||||
struct dw_axi_dma_hcfg *hdata;
|
||||
struct reset_control *resets;
|
||||
unsigned int flags;
|
||||
u32 i;
|
||||
int ret;
|
||||
|
||||
|
@ -1398,12 +1406,25 @@ static int dw_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(chip->regs))
|
||||
return PTR_ERR(chip->regs);
|
||||
|
||||
if (of_device_is_compatible(node, "intel,kmb-axi-dma")) {
|
||||
flags = (uintptr_t)of_device_get_match_data(&pdev->dev);
|
||||
if (flags & AXI_DMA_FLAG_HAS_APB_REGS) {
|
||||
chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
|
||||
if (IS_ERR(chip->apb_regs))
|
||||
return PTR_ERR(chip->apb_regs);
|
||||
}
|
||||
|
||||
if (flags & AXI_DMA_FLAG_HAS_RESETS) {
|
||||
resets = devm_reset_control_array_get_exclusive(&pdev->dev);
|
||||
if (IS_ERR(resets))
|
||||
return PTR_ERR(resets);
|
||||
|
||||
ret = reset_control_deassert(resets);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
chip->dw->hdata->use_cfg2 = !!(flags & AXI_DMA_FLAG_USE_CFG2);
|
||||
|
||||
chip->core_clk = devm_clk_get(chip->dev, "core-clk");
|
||||
if (IS_ERR(chip->core_clk))
|
||||
return PTR_ERR(chip->core_clk);
|
||||
|
@ -1554,8 +1575,15 @@ static const struct dev_pm_ops dw_axi_dma_pm_ops = {
|
|||
};
|
||||
|
||||
static const struct of_device_id dw_dma_of_id_table[] = {
|
||||
{ .compatible = "snps,axi-dma-1.01a" },
|
||||
{ .compatible = "intel,kmb-axi-dma" },
|
||||
{
|
||||
.compatible = "snps,axi-dma-1.01a"
|
||||
}, {
|
||||
.compatible = "intel,kmb-axi-dma",
|
||||
.data = (void *)AXI_DMA_FLAG_HAS_APB_REGS,
|
||||
}, {
|
||||
.compatible = "starfive,jh7110-axi-dma",
|
||||
.data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2),
|
||||
},
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
|
||||
|
|
|
@ -33,6 +33,7 @@ struct dw_axi_dma_hcfg {
|
|||
/* Register map for DMAX_NUM_CHANNELS <= 8 */
|
||||
bool reg_map_8_channels;
|
||||
bool restrict_axi_burst_len;
|
||||
bool use_cfg2;
|
||||
};
|
||||
|
||||
struct axi_dma_chan {
|
||||
|
|
|
@ -181,7 +181,7 @@ static void vchan_free_desc(struct virt_dma_desc *vdesc)
|
|||
dw_edma_free_desc(vd2dw_edma_desc(vdesc));
|
||||
}
|
||||
|
||||
static void dw_edma_start_transfer(struct dw_edma_chan *chan)
|
||||
static int dw_edma_start_transfer(struct dw_edma_chan *chan)
|
||||
{
|
||||
struct dw_edma_chunk *child;
|
||||
struct dw_edma_desc *desc;
|
||||
|
@ -189,16 +189,16 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
|
|||
|
||||
vd = vchan_next_desc(&chan->vc);
|
||||
if (!vd)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
desc = vd2dw_edma_desc(vd);
|
||||
if (!desc)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
child = list_first_entry_or_null(&desc->chunk->list,
|
||||
struct dw_edma_chunk, list);
|
||||
if (!child)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
dw_edma_v0_core_start(child, !desc->xfer_sz);
|
||||
desc->xfer_sz += child->ll_region.sz;
|
||||
|
@ -206,6 +206,8 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
|
|||
list_del(&child->list);
|
||||
kfree(child);
|
||||
desc->chunks_alloc--;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void dw_edma_device_caps(struct dma_chan *dchan,
|
||||
|
@ -306,9 +308,12 @@ static void dw_edma_device_issue_pending(struct dma_chan *dchan)
|
|||
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
|
||||
unsigned long flags;
|
||||
|
||||
if (!chan->configured)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||
if (chan->configured && chan->request == EDMA_REQ_NONE &&
|
||||
chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
|
||||
if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
|
||||
chan->status == EDMA_ST_IDLE) {
|
||||
chan->status = EDMA_ST_BUSY;
|
||||
dw_edma_start_transfer(chan);
|
||||
}
|
||||
|
@ -602,14 +607,14 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
|
|||
switch (chan->request) {
|
||||
case EDMA_REQ_NONE:
|
||||
desc = vd2dw_edma_desc(vd);
|
||||
if (desc->chunks_alloc) {
|
||||
chan->status = EDMA_ST_BUSY;
|
||||
dw_edma_start_transfer(chan);
|
||||
} else {
|
||||
if (!desc->chunks_alloc) {
|
||||
list_del(&vd->node);
|
||||
vchan_cookie_complete(vd);
|
||||
chan->status = EDMA_ST_IDLE;
|
||||
}
|
||||
|
||||
/* Continue transferring if there are remaining chunks or issued requests.
|
||||
*/
|
||||
chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE;
|
||||
break;
|
||||
|
||||
case EDMA_REQ_STOP:
|
||||
|
|
|
@ -159,62 +159,6 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
|
|||
#define GET_CH_32(dw, dir, ch, name) \
|
||||
readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
|
||||
|
||||
static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
|
||||
u64 value, void __iomem *addr)
|
||||
{
|
||||
if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
|
||||
u32 viewport_sel;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&dw->lock, flags);
|
||||
|
||||
viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
|
||||
if (dir == EDMA_DIR_READ)
|
||||
viewport_sel |= BIT(31);
|
||||
|
||||
writel(viewport_sel,
|
||||
&(__dw_regs(dw)->type.legacy.viewport_sel));
|
||||
writeq(value, addr);
|
||||
|
||||
raw_spin_unlock_irqrestore(&dw->lock, flags);
|
||||
} else {
|
||||
writeq(value, addr);
|
||||
}
|
||||
}
|
||||
|
||||
static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
|
||||
const void __iomem *addr)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
|
||||
u32 viewport_sel;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&dw->lock, flags);
|
||||
|
||||
viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
|
||||
if (dir == EDMA_DIR_READ)
|
||||
viewport_sel |= BIT(31);
|
||||
|
||||
writel(viewport_sel,
|
||||
&(__dw_regs(dw)->type.legacy.viewport_sel));
|
||||
value = readq(addr);
|
||||
|
||||
raw_spin_unlock_irqrestore(&dw->lock, flags);
|
||||
} else {
|
||||
value = readq(addr);
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
#define SET_CH_64(dw, dir, ch, name, value) \
|
||||
writeq_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
|
||||
|
||||
#define GET_CH_64(dw, dir, ch, name) \
|
||||
readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
|
||||
|
||||
/* eDMA management callbacks */
|
||||
void dw_edma_v0_core_off(struct dw_edma *dw)
|
||||
{
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
|
||||
|
||||
obj-$(CONFIG_INTEL_IDXD) += idxd.o
|
||||
idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
|
||||
idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o
|
||||
|
||||
idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
|
||||
|
||||
|
|
|
@ -11,7 +11,9 @@
|
|||
#include <linux/fs.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <uapi/linux/idxd.h>
|
||||
#include <linux/xarray.h>
|
||||
#include "registers.h"
|
||||
#include "idxd.h"
|
||||
|
||||
|
@ -21,6 +23,13 @@ struct idxd_cdev_context {
|
|||
struct ida minor_ida;
|
||||
};
|
||||
|
||||
/*
|
||||
* Since user file names are global in DSA devices, define their ida's as
|
||||
* global to avoid conflict file names.
|
||||
*/
|
||||
static DEFINE_IDA(file_ida);
|
||||
static DEFINE_MUTEX(ida_lock);
|
||||
|
||||
/*
|
||||
* ictx is an array based off of accelerator types. enum idxd_type
|
||||
* is used as index
|
||||
|
@ -34,8 +43,119 @@ struct idxd_user_context {
|
|||
struct idxd_wq *wq;
|
||||
struct task_struct *task;
|
||||
unsigned int pasid;
|
||||
struct mm_struct *mm;
|
||||
unsigned int flags;
|
||||
struct iommu_sva *sva;
|
||||
struct idxd_dev idxd_dev;
|
||||
u64 counters[COUNTER_MAX];
|
||||
int id;
|
||||
pid_t pid;
|
||||
};
|
||||
|
||||
static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid);
|
||||
static void idxd_xa_pasid_remove(struct idxd_user_context *ctx);
|
||||
|
||||
static inline struct idxd_user_context *dev_to_uctx(struct device *dev)
|
||||
{
|
||||
struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
|
||||
|
||||
return container_of(idxd_dev, struct idxd_user_context, idxd_dev);
|
||||
}
|
||||
|
||||
static ssize_t cr_faults_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct idxd_user_context *ctx = dev_to_uctx(dev);
|
||||
|
||||
return sysfs_emit(buf, "%llu\n", ctx->counters[COUNTER_FAULTS]);
|
||||
}
|
||||
static DEVICE_ATTR_RO(cr_faults);
|
||||
|
||||
static ssize_t cr_fault_failures_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct idxd_user_context *ctx = dev_to_uctx(dev);
|
||||
|
||||
return sysfs_emit(buf, "%llu\n", ctx->counters[COUNTER_FAULT_FAILS]);
|
||||
}
|
||||
static DEVICE_ATTR_RO(cr_fault_failures);
|
||||
|
||||
static ssize_t pid_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct idxd_user_context *ctx = dev_to_uctx(dev);
|
||||
|
||||
return sysfs_emit(buf, "%u\n", ctx->pid);
|
||||
}
|
||||
static DEVICE_ATTR_RO(pid);
|
||||
|
||||
static struct attribute *cdev_file_attributes[] = {
|
||||
&dev_attr_cr_faults.attr,
|
||||
&dev_attr_cr_fault_failures.attr,
|
||||
&dev_attr_pid.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static umode_t cdev_file_attr_visible(struct kobject *kobj, struct attribute *a, int n)
|
||||
{
|
||||
struct device *dev = container_of(kobj, typeof(*dev), kobj);
|
||||
struct idxd_user_context *ctx = dev_to_uctx(dev);
|
||||
struct idxd_wq *wq = ctx->wq;
|
||||
|
||||
if (!wq_pasid_enabled(wq))
|
||||
return 0;
|
||||
|
||||
return a->mode;
|
||||
}
|
||||
|
||||
static const struct attribute_group cdev_file_attribute_group = {
|
||||
.attrs = cdev_file_attributes,
|
||||
.is_visible = cdev_file_attr_visible,
|
||||
};
|
||||
|
||||
static const struct attribute_group *cdev_file_attribute_groups[] = {
|
||||
&cdev_file_attribute_group,
|
||||
NULL
|
||||
};
|
||||
|
||||
static void idxd_file_dev_release(struct device *dev)
|
||||
{
|
||||
struct idxd_user_context *ctx = dev_to_uctx(dev);
|
||||
struct idxd_wq *wq = ctx->wq;
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
int rc;
|
||||
|
||||
mutex_lock(&ida_lock);
|
||||
ida_free(&file_ida, ctx->id);
|
||||
mutex_unlock(&ida_lock);
|
||||
|
||||
/* Wait for in-flight operations to complete. */
|
||||
if (wq_shared(wq)) {
|
||||
idxd_device_drain_pasid(idxd, ctx->pasid);
|
||||
} else {
|
||||
if (device_user_pasid_enabled(idxd)) {
|
||||
/* The wq disable in the disable pasid function will drain the wq */
|
||||
rc = idxd_wq_disable_pasid(wq);
|
||||
if (rc < 0)
|
||||
dev_err(dev, "wq disable pasid failed.\n");
|
||||
} else {
|
||||
idxd_wq_drain(wq);
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx->sva) {
|
||||
idxd_cdev_evl_drain_pasid(wq, ctx->pasid);
|
||||
iommu_sva_unbind_device(ctx->sva);
|
||||
idxd_xa_pasid_remove(ctx);
|
||||
}
|
||||
kfree(ctx);
|
||||
mutex_lock(&wq->wq_lock);
|
||||
idxd_wq_put(wq);
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
}
|
||||
|
||||
static struct device_type idxd_cdev_file_type = {
|
||||
.name = "idxd_file",
|
||||
.release = idxd_file_dev_release,
|
||||
.groups = cdev_file_attribute_groups,
|
||||
};
|
||||
|
||||
static void idxd_cdev_dev_release(struct device *dev)
|
||||
|
@ -68,15 +188,46 @@ static inline struct idxd_wq *inode_wq(struct inode *inode)
|
|||
return idxd_cdev->wq;
|
||||
}
|
||||
|
||||
static void idxd_xa_pasid_remove(struct idxd_user_context *ctx)
|
||||
{
|
||||
struct idxd_wq *wq = ctx->wq;
|
||||
void *ptr;
|
||||
|
||||
mutex_lock(&wq->uc_lock);
|
||||
ptr = xa_cmpxchg(&wq->upasid_xa, ctx->pasid, ctx, NULL, GFP_KERNEL);
|
||||
if (ptr != (void *)ctx)
|
||||
dev_warn(&wq->idxd->pdev->dev, "xarray cmpxchg failed for pasid %u\n",
|
||||
ctx->pasid);
|
||||
mutex_unlock(&wq->uc_lock);
|
||||
}
|
||||
|
||||
void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index)
|
||||
{
|
||||
struct idxd_user_context *ctx;
|
||||
|
||||
if (index >= COUNTER_MAX)
|
||||
return;
|
||||
|
||||
mutex_lock(&wq->uc_lock);
|
||||
ctx = xa_load(&wq->upasid_xa, pasid);
|
||||
if (!ctx) {
|
||||
mutex_unlock(&wq->uc_lock);
|
||||
return;
|
||||
}
|
||||
ctx->counters[index]++;
|
||||
mutex_unlock(&wq->uc_lock);
|
||||
}
|
||||
|
||||
static int idxd_cdev_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct idxd_user_context *ctx;
|
||||
struct idxd_device *idxd;
|
||||
struct idxd_wq *wq;
|
||||
struct device *dev;
|
||||
struct device *dev, *fdev;
|
||||
int rc = 0;
|
||||
struct iommu_sva *sva;
|
||||
unsigned int pasid;
|
||||
struct idxd_cdev *idxd_cdev;
|
||||
|
||||
wq = inode_wq(inode);
|
||||
idxd = wq->idxd;
|
||||
|
@ -97,6 +248,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
|
|||
|
||||
ctx->wq = wq;
|
||||
filp->private_data = ctx;
|
||||
ctx->pid = current->pid;
|
||||
|
||||
if (device_user_pasid_enabled(idxd)) {
|
||||
sva = iommu_sva_bind_device(dev, current->mm);
|
||||
|
@ -108,65 +260,118 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
|
|||
|
||||
pasid = iommu_sva_get_pasid(sva);
|
||||
if (pasid == IOMMU_PASID_INVALID) {
|
||||
iommu_sva_unbind_device(sva);
|
||||
rc = -EINVAL;
|
||||
goto failed;
|
||||
goto failed_get_pasid;
|
||||
}
|
||||
|
||||
ctx->sva = sva;
|
||||
ctx->pasid = pasid;
|
||||
ctx->mm = current->mm;
|
||||
|
||||
mutex_lock(&wq->uc_lock);
|
||||
rc = xa_insert(&wq->upasid_xa, pasid, ctx, GFP_KERNEL);
|
||||
mutex_unlock(&wq->uc_lock);
|
||||
if (rc < 0)
|
||||
dev_warn(dev, "PASID entry already exist in xarray.\n");
|
||||
|
||||
if (wq_dedicated(wq)) {
|
||||
rc = idxd_wq_set_pasid(wq, pasid);
|
||||
if (rc < 0) {
|
||||
iommu_sva_unbind_device(sva);
|
||||
dev_err(dev, "wq set pasid failed: %d\n", rc);
|
||||
goto failed;
|
||||
goto failed_set_pasid;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
idxd_cdev = wq->idxd_cdev;
|
||||
mutex_lock(&ida_lock);
|
||||
ctx->id = ida_alloc(&file_ida, GFP_KERNEL);
|
||||
mutex_unlock(&ida_lock);
|
||||
if (ctx->id < 0) {
|
||||
dev_warn(dev, "ida alloc failure\n");
|
||||
goto failed_ida;
|
||||
}
|
||||
ctx->idxd_dev.type = IDXD_DEV_CDEV_FILE;
|
||||
fdev = user_ctx_dev(ctx);
|
||||
device_initialize(fdev);
|
||||
fdev->parent = cdev_dev(idxd_cdev);
|
||||
fdev->bus = &dsa_bus_type;
|
||||
fdev->type = &idxd_cdev_file_type;
|
||||
|
||||
rc = dev_set_name(fdev, "file%d", ctx->id);
|
||||
if (rc < 0) {
|
||||
dev_warn(dev, "set name failure\n");
|
||||
goto failed_dev_name;
|
||||
}
|
||||
|
||||
rc = device_add(fdev);
|
||||
if (rc < 0) {
|
||||
dev_warn(dev, "file device add failure\n");
|
||||
goto failed_dev_add;
|
||||
}
|
||||
|
||||
idxd_wq_get(wq);
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
failed_dev_add:
|
||||
failed_dev_name:
|
||||
put_device(fdev);
|
||||
failed_ida:
|
||||
failed_set_pasid:
|
||||
if (device_user_pasid_enabled(idxd))
|
||||
idxd_xa_pasid_remove(ctx);
|
||||
failed_get_pasid:
|
||||
if (device_user_pasid_enabled(idxd))
|
||||
iommu_sva_unbind_device(sva);
|
||||
failed:
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
kfree(ctx);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
|
||||
{
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
struct idxd_evl *evl = idxd->evl;
|
||||
union evl_status_reg status;
|
||||
u16 h, t, size;
|
||||
int ent_size = evl_ent_size(idxd);
|
||||
struct __evl_entry *entry_head;
|
||||
|
||||
if (!evl)
|
||||
return;
|
||||
|
||||
spin_lock(&evl->lock);
|
||||
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||
t = status.tail;
|
||||
h = evl->head;
|
||||
size = evl->size;
|
||||
|
||||
while (h != t) {
|
||||
entry_head = (struct __evl_entry *)(evl->log + (h * ent_size));
|
||||
if (entry_head->pasid == pasid && entry_head->wq_idx == wq->id)
|
||||
set_bit(h, evl->bmap);
|
||||
h = (h + 1) % size;
|
||||
}
|
||||
spin_unlock(&evl->lock);
|
||||
|
||||
drain_workqueue(wq->wq);
|
||||
}
|
||||
|
||||
static int idxd_cdev_release(struct inode *node, struct file *filep)
|
||||
{
|
||||
struct idxd_user_context *ctx = filep->private_data;
|
||||
struct idxd_wq *wq = ctx->wq;
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
int rc;
|
||||
|
||||
dev_dbg(dev, "%s called\n", __func__);
|
||||
filep->private_data = NULL;
|
||||
|
||||
/* Wait for in-flight operations to complete. */
|
||||
if (wq_shared(wq)) {
|
||||
idxd_device_drain_pasid(idxd, ctx->pasid);
|
||||
} else {
|
||||
if (device_user_pasid_enabled(idxd)) {
|
||||
/* The wq disable in the disable pasid function will drain the wq */
|
||||
rc = idxd_wq_disable_pasid(wq);
|
||||
if (rc < 0)
|
||||
dev_err(dev, "wq disable pasid failed.\n");
|
||||
} else {
|
||||
idxd_wq_drain(wq);
|
||||
}
|
||||
}
|
||||
device_unregister(user_ctx_dev(ctx));
|
||||
|
||||
if (ctx->sva)
|
||||
iommu_sva_unbind_device(ctx->sva);
|
||||
kfree(ctx);
|
||||
mutex_lock(&wq->wq_lock);
|
||||
idxd_wq_put(wq);
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -297,6 +502,7 @@ void idxd_wq_del_cdev(struct idxd_wq *wq)
|
|||
struct idxd_cdev *idxd_cdev;
|
||||
|
||||
idxd_cdev = wq->idxd_cdev;
|
||||
ida_destroy(&file_ida);
|
||||
wq->idxd_cdev = NULL;
|
||||
cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
|
||||
put_device(cdev_dev(idxd_cdev));
|
||||
|
@ -330,6 +536,13 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
|
|||
}
|
||||
|
||||
mutex_lock(&wq->wq_lock);
|
||||
|
||||
wq->wq = create_workqueue(dev_name(wq_confdev(wq)));
|
||||
if (!wq->wq) {
|
||||
rc = -ENOMEM;
|
||||
goto wq_err;
|
||||
}
|
||||
|
||||
wq->type = IDXD_WQT_USER;
|
||||
rc = drv_enable_wq(wq);
|
||||
if (rc < 0)
|
||||
|
@ -348,7 +561,9 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
|
|||
err_cdev:
|
||||
drv_disable_wq(wq);
|
||||
err:
|
||||
destroy_workqueue(wq->wq);
|
||||
wq->type = IDXD_WQT_NONE;
|
||||
wq_err:
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
return rc;
|
||||
}
|
||||
|
@ -361,6 +576,8 @@ static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
|
|||
idxd_wq_del_cdev(wq);
|
||||
drv_disable_wq(wq);
|
||||
wq->type = IDXD_WQT_NONE;
|
||||
destroy_workqueue(wq->wq);
|
||||
wq->wq = NULL;
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
}
|
||||
|
||||
|
@ -407,3 +624,70 @@ void idxd_cdev_remove(void)
|
|||
ida_destroy(&ictx[i].minor_ida);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* idxd_copy_cr - copy completion record to user address space found by wq and
|
||||
* PASID
|
||||
* @wq: work queue
|
||||
* @pasid: PASID
|
||||
* @addr: user fault address to write
|
||||
* @cr: completion record
|
||||
* @len: number of bytes to copy
|
||||
*
|
||||
* This is called by a work that handles completion record fault.
|
||||
*
|
||||
* Return: number of bytes copied.
|
||||
*/
|
||||
int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr,
|
||||
void *cr, int len)
|
||||
{
|
||||
struct device *dev = &wq->idxd->pdev->dev;
|
||||
int left = len, status_size = 1;
|
||||
struct idxd_user_context *ctx;
|
||||
struct mm_struct *mm;
|
||||
|
||||
mutex_lock(&wq->uc_lock);
|
||||
|
||||
ctx = xa_load(&wq->upasid_xa, pasid);
|
||||
if (!ctx) {
|
||||
dev_warn(dev, "No user context\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
mm = ctx->mm;
|
||||
/*
|
||||
* The completion record fault handling work is running in kernel
|
||||
* thread context. It temporarily switches to the mm to copy cr
|
||||
* to addr in the mm.
|
||||
*/
|
||||
kthread_use_mm(mm);
|
||||
left = copy_to_user((void __user *)addr + status_size, cr + status_size,
|
||||
len - status_size);
|
||||
/*
|
||||
* Copy status only after the rest of completion record is copied
|
||||
* successfully so that the user gets the complete completion record
|
||||
* when a non-zero status is polled.
|
||||
*/
|
||||
if (!left) {
|
||||
u8 status;
|
||||
|
||||
/*
|
||||
* Ensure that the completion record's status field is written
|
||||
* after the rest of the completion record has been written.
|
||||
* This ensures that the user receives the correct completion
|
||||
* record information once polling for a non-zero status.
|
||||
*/
|
||||
wmb();
|
||||
status = *(u8 *)cr;
|
||||
if (put_user(status, (u8 __user *)addr))
|
||||
left += status_size;
|
||||
} else {
|
||||
left += status_size;
|
||||
}
|
||||
kthread_unuse_mm(mm);
|
||||
|
||||
out:
|
||||
mutex_unlock(&wq->uc_lock);
|
||||
|
||||
return len - left;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,138 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
#include <uapi/linux/idxd.h>
|
||||
#include "idxd.h"
|
||||
#include "registers.h"
|
||||
|
||||
static struct dentry *idxd_debugfs_dir;
|
||||
|
||||
static void dump_event_entry(struct idxd_device *idxd, struct seq_file *s,
|
||||
u16 index, int *count, bool processed)
|
||||
{
|
||||
struct idxd_evl *evl = idxd->evl;
|
||||
struct dsa_evl_entry *entry;
|
||||
struct dsa_completion_record *cr;
|
||||
u64 *raw;
|
||||
int i;
|
||||
int evl_strides = evl_ent_size(idxd) / sizeof(u64);
|
||||
|
||||
entry = (struct dsa_evl_entry *)evl->log + index;
|
||||
|
||||
if (!entry->e.desc_valid)
|
||||
return;
|
||||
|
||||
seq_printf(s, "Event Log entry %d (real index %u) processed: %u\n",
|
||||
*count, index, processed);
|
||||
|
||||
seq_printf(s, "desc valid %u wq idx valid %u\n"
|
||||
"batch %u fault rw %u priv %u error 0x%x\n"
|
||||
"wq idx %u op %#x pasid %u batch idx %u\n"
|
||||
"fault addr %#llx\n",
|
||||
entry->e.desc_valid, entry->e.wq_idx_valid,
|
||||
entry->e.batch, entry->e.fault_rw, entry->e.priv,
|
||||
entry->e.error, entry->e.wq_idx, entry->e.operation,
|
||||
entry->e.pasid, entry->e.batch_idx, entry->e.fault_addr);
|
||||
|
||||
cr = &entry->cr;
|
||||
seq_printf(s, "status %#x result %#x fault_info %#x bytes_completed %u\n"
|
||||
"fault addr %#llx inv flags %#x\n\n",
|
||||
cr->status, cr->result, cr->fault_info, cr->bytes_completed,
|
||||
cr->fault_addr, cr->invalid_flags);
|
||||
|
||||
raw = (u64 *)entry;
|
||||
|
||||
for (i = 0; i < evl_strides; i++)
|
||||
seq_printf(s, "entry[%d] = %#llx\n", i, raw[i]);
|
||||
|
||||
seq_puts(s, "\n");
|
||||
*count += 1;
|
||||
}
|
||||
|
||||
static int debugfs_evl_show(struct seq_file *s, void *d)
|
||||
{
|
||||
struct idxd_device *idxd = s->private;
|
||||
struct idxd_evl *evl = idxd->evl;
|
||||
union evl_status_reg evl_status;
|
||||
u16 h, t, evl_size, i;
|
||||
int count = 0;
|
||||
bool processed = true;
|
||||
|
||||
if (!evl || !evl->log)
|
||||
return 0;
|
||||
|
||||
spin_lock(&evl->lock);
|
||||
|
||||
h = evl->head;
|
||||
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||
t = evl_status.tail;
|
||||
evl_size = evl->size;
|
||||
|
||||
seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n",
|
||||
evl_status.head, evl_status.tail, evl_status.int_pending);
|
||||
|
||||
i = t;
|
||||
while (1) {
|
||||
i = (i + 1) % evl_size;
|
||||
if (i == t)
|
||||
break;
|
||||
|
||||
if (processed && i == h)
|
||||
processed = false;
|
||||
dump_event_entry(idxd, s, i, &count, processed);
|
||||
}
|
||||
|
||||
spin_unlock(&evl->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SHOW_ATTRIBUTE(debugfs_evl);
|
||||
|
||||
int idxd_device_init_debugfs(struct idxd_device *idxd)
|
||||
{
|
||||
if (IS_ERR_OR_NULL(idxd_debugfs_dir))
|
||||
return 0;
|
||||
|
||||
idxd->dbgfs_dir = debugfs_create_dir(dev_name(idxd_confdev(idxd)), idxd_debugfs_dir);
|
||||
if (IS_ERR(idxd->dbgfs_dir))
|
||||
return PTR_ERR(idxd->dbgfs_dir);
|
||||
|
||||
if (idxd->evl) {
|
||||
idxd->dbgfs_evl_file = debugfs_create_file("event_log", 0400,
|
||||
idxd->dbgfs_dir, idxd,
|
||||
&debugfs_evl_fops);
|
||||
if (IS_ERR(idxd->dbgfs_evl_file)) {
|
||||
debugfs_remove_recursive(idxd->dbgfs_dir);
|
||||
idxd->dbgfs_dir = NULL;
|
||||
return PTR_ERR(idxd->dbgfs_evl_file);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void idxd_device_remove_debugfs(struct idxd_device *idxd)
|
||||
{
|
||||
debugfs_remove_recursive(idxd->dbgfs_dir);
|
||||
}
|
||||
|
||||
int idxd_init_debugfs(void)
|
||||
{
|
||||
if (!debugfs_initialized())
|
||||
return 0;
|
||||
|
||||
idxd_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
|
||||
if (IS_ERR(idxd_debugfs_dir))
|
||||
return PTR_ERR(idxd_debugfs_dir);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void idxd_remove_debugfs(void)
|
||||
{
|
||||
debugfs_remove_recursive(idxd_debugfs_dir);
|
||||
}
|
|
@ -752,6 +752,101 @@ void idxd_device_clear_state(struct idxd_device *idxd)
|
|||
spin_unlock(&idxd->dev_lock);
|
||||
}
|
||||
|
||||
static int idxd_device_evl_setup(struct idxd_device *idxd)
|
||||
{
|
||||
union gencfg_reg gencfg;
|
||||
union evlcfg_reg evlcfg;
|
||||
union genctrl_reg genctrl;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
void *addr;
|
||||
dma_addr_t dma_addr;
|
||||
int size;
|
||||
struct idxd_evl *evl = idxd->evl;
|
||||
unsigned long *bmap;
|
||||
int rc;
|
||||
|
||||
if (!evl)
|
||||
return 0;
|
||||
|
||||
size = evl_size(idxd);
|
||||
|
||||
bmap = bitmap_zalloc(size, GFP_KERNEL);
|
||||
if (!bmap) {
|
||||
rc = -ENOMEM;
|
||||
goto err_bmap;
|
||||
}
|
||||
|
||||
/*
|
||||
* Address needs to be page aligned. However, dma_alloc_coherent() provides
|
||||
* at minimal page size aligned address. No manual alignment required.
|
||||
*/
|
||||
addr = dma_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
|
||||
if (!addr) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
memset(addr, 0, size);
|
||||
|
||||
spin_lock(&evl->lock);
|
||||
evl->log = addr;
|
||||
evl->dma = dma_addr;
|
||||
evl->log_size = size;
|
||||
evl->bmap = bmap;
|
||||
|
||||
memset(&evlcfg, 0, sizeof(evlcfg));
|
||||
evlcfg.bits[0] = dma_addr & GENMASK(63, 12);
|
||||
evlcfg.size = evl->size;
|
||||
|
||||
iowrite64(evlcfg.bits[0], idxd->reg_base + IDXD_EVLCFG_OFFSET);
|
||||
iowrite64(evlcfg.bits[1], idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
|
||||
|
||||
genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
|
||||
genctrl.evl_int_en = 1;
|
||||
iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
|
||||
|
||||
gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
|
||||
gencfg.evl_en = 1;
|
||||
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
|
||||
|
||||
spin_unlock(&evl->lock);
|
||||
return 0;
|
||||
|
||||
err_alloc:
|
||||
bitmap_free(bmap);
|
||||
err_bmap:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void idxd_device_evl_free(struct idxd_device *idxd)
|
||||
{
|
||||
union gencfg_reg gencfg;
|
||||
union genctrl_reg genctrl;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
struct idxd_evl *evl = idxd->evl;
|
||||
|
||||
gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
|
||||
if (!gencfg.evl_en)
|
||||
return;
|
||||
|
||||
spin_lock(&evl->lock);
|
||||
gencfg.evl_en = 0;
|
||||
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
|
||||
|
||||
genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
|
||||
genctrl.evl_int_en = 0;
|
||||
iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
|
||||
|
||||
iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET);
|
||||
iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
|
||||
|
||||
dma_free_coherent(dev, evl->log_size, evl->log, evl->dma);
|
||||
bitmap_free(evl->bmap);
|
||||
evl->log = NULL;
|
||||
evl->size = IDXD_EVL_SIZE_MIN;
|
||||
spin_unlock(&evl->lock);
|
||||
}
|
||||
|
||||
static void idxd_group_config_write(struct idxd_group *group)
|
||||
{
|
||||
struct idxd_device *idxd = group->idxd;
|
||||
|
@ -872,12 +967,16 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
|
|||
wq->wqcfg->priority = wq->priority;
|
||||
|
||||
if (idxd->hw.gen_cap.block_on_fault &&
|
||||
test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags))
|
||||
test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags) &&
|
||||
!test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags))
|
||||
wq->wqcfg->bof = 1;
|
||||
|
||||
if (idxd->hw.wq_cap.wq_ats_support)
|
||||
wq->wqcfg->wq_ats_disable = test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
|
||||
|
||||
if (idxd->hw.wq_cap.wq_prs_support)
|
||||
wq->wqcfg->wq_prs_disable = test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
|
||||
|
||||
/* bytes 12-15 */
|
||||
wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
|
||||
idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size));
|
||||
|
@ -1451,15 +1550,24 @@ int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
|
|||
if (rc < 0)
|
||||
return -ENXIO;
|
||||
|
||||
rc = idxd_device_evl_setup(idxd);
|
||||
if (rc < 0) {
|
||||
idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Start device */
|
||||
rc = idxd_device_enable(idxd);
|
||||
if (rc < 0)
|
||||
if (rc < 0) {
|
||||
idxd_device_evl_free(idxd);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Setup DMA device without channels */
|
||||
rc = idxd_register_dma_device(idxd);
|
||||
if (rc < 0) {
|
||||
idxd_device_disable(idxd);
|
||||
idxd_device_evl_free(idxd);
|
||||
idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
|
||||
return rc;
|
||||
}
|
||||
|
@ -1488,6 +1596,7 @@ void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
|
|||
idxd_device_disable(idxd);
|
||||
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
|
||||
idxd_device_reset(idxd);
|
||||
idxd_device_evl_free(idxd);
|
||||
}
|
||||
|
||||
static enum idxd_dev_type dev_types[] = {
|
||||
|
|
|
@ -32,6 +32,7 @@ enum idxd_dev_type {
|
|||
IDXD_DEV_GROUP,
|
||||
IDXD_DEV_ENGINE,
|
||||
IDXD_DEV_CDEV,
|
||||
IDXD_DEV_CDEV_FILE,
|
||||
IDXD_DEV_MAX_TYPE,
|
||||
};
|
||||
|
||||
|
@ -127,6 +128,12 @@ struct idxd_pmu {
|
|||
|
||||
#define IDXD_MAX_PRIORITY 0xf
|
||||
|
||||
enum {
|
||||
COUNTER_FAULTS = 0,
|
||||
COUNTER_FAULT_FAILS,
|
||||
COUNTER_MAX
|
||||
};
|
||||
|
||||
enum idxd_wq_state {
|
||||
IDXD_WQ_DISABLED = 0,
|
||||
IDXD_WQ_ENABLED,
|
||||
|
@ -136,6 +143,7 @@ enum idxd_wq_flag {
|
|||
WQ_FLAG_DEDICATED = 0,
|
||||
WQ_FLAG_BLOCK_ON_FAULT,
|
||||
WQ_FLAG_ATS_DISABLE,
|
||||
WQ_FLAG_PRS_DISABLE,
|
||||
};
|
||||
|
||||
enum idxd_wq_type {
|
||||
|
@ -185,6 +193,7 @@ struct idxd_wq {
|
|||
struct idxd_dev idxd_dev;
|
||||
struct idxd_cdev *idxd_cdev;
|
||||
struct wait_queue_head err_queue;
|
||||
struct workqueue_struct *wq;
|
||||
struct idxd_device *idxd;
|
||||
int id;
|
||||
struct idxd_irq_entry ie;
|
||||
|
@ -214,6 +223,10 @@ struct idxd_wq {
|
|||
char name[WQ_NAME_SIZE + 1];
|
||||
u64 max_xfer_bytes;
|
||||
u32 max_batch_size;
|
||||
|
||||
/* Lock to protect upasid_xa access. */
|
||||
struct mutex uc_lock;
|
||||
struct xarray upasid_xa;
|
||||
};
|
||||
|
||||
struct idxd_engine {
|
||||
|
@ -232,6 +245,7 @@ struct idxd_hw {
|
|||
union engine_cap_reg engine_cap;
|
||||
struct opcap opcap;
|
||||
u32 cmd_cap;
|
||||
union iaa_cap_reg iaa_cap;
|
||||
};
|
||||
|
||||
enum idxd_device_state {
|
||||
|
@ -258,6 +272,32 @@ struct idxd_driver_data {
|
|||
struct device_type *dev_type;
|
||||
int compl_size;
|
||||
int align;
|
||||
int evl_cr_off;
|
||||
int cr_status_off;
|
||||
int cr_result_off;
|
||||
};
|
||||
|
||||
struct idxd_evl {
|
||||
/* Lock to protect event log access. */
|
||||
spinlock_t lock;
|
||||
void *log;
|
||||
dma_addr_t dma;
|
||||
/* Total size of event log = number of entries * entry size. */
|
||||
unsigned int log_size;
|
||||
/* The number of entries in the event log. */
|
||||
u16 size;
|
||||
u16 head;
|
||||
unsigned long *bmap;
|
||||
bool batch_fail[IDXD_MAX_BATCH_IDENT];
|
||||
};
|
||||
|
||||
struct idxd_evl_fault {
|
||||
struct work_struct work;
|
||||
struct idxd_wq *wq;
|
||||
u8 status;
|
||||
|
||||
/* make this last member always */
|
||||
struct __evl_entry entry[];
|
||||
};
|
||||
|
||||
struct idxd_device {
|
||||
|
@ -316,8 +356,24 @@ struct idxd_device {
|
|||
struct idxd_pmu *idxd_pmu;
|
||||
|
||||
unsigned long *opcap_bmap;
|
||||
struct idxd_evl *evl;
|
||||
struct kmem_cache *evl_cache;
|
||||
|
||||
struct dentry *dbgfs_dir;
|
||||
struct dentry *dbgfs_evl_file;
|
||||
};
|
||||
|
||||
static inline unsigned int evl_ent_size(struct idxd_device *idxd)
|
||||
{
|
||||
return idxd->hw.gen_cap.evl_support ?
|
||||
(32 * (1 << idxd->hw.gen_cap.evl_support)) : 0;
|
||||
}
|
||||
|
||||
static inline unsigned int evl_size(struct idxd_device *idxd)
|
||||
{
|
||||
return idxd->evl->size * evl_ent_size(idxd);
|
||||
}
|
||||
|
||||
/* IDXD software descriptor */
|
||||
struct idxd_desc {
|
||||
union {
|
||||
|
@ -351,6 +407,7 @@ enum idxd_completion_status {
|
|||
#define engine_confdev(engine) &engine->idxd_dev.conf_dev
|
||||
#define group_confdev(group) &group->idxd_dev.conf_dev
|
||||
#define cdev_dev(cdev) &cdev->idxd_dev.conf_dev
|
||||
#define user_ctx_dev(ctx) (&(ctx)->idxd_dev.conf_dev)
|
||||
|
||||
#define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev)
|
||||
#define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev)
|
||||
|
@ -598,6 +655,7 @@ int idxd_register_driver(void);
|
|||
void idxd_unregister_driver(void);
|
||||
void idxd_wqs_quiesce(struct idxd_device *idxd);
|
||||
bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
|
||||
void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count);
|
||||
|
||||
/* device interrupt control */
|
||||
irqreturn_t idxd_misc_thread(int vec, void *data);
|
||||
|
@ -662,6 +720,9 @@ void idxd_cdev_remove(void);
|
|||
int idxd_cdev_get_major(struct idxd_device *idxd);
|
||||
int idxd_wq_add_cdev(struct idxd_wq *wq);
|
||||
void idxd_wq_del_cdev(struct idxd_wq *wq);
|
||||
int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr,
|
||||
void *buf, int len);
|
||||
void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index);
|
||||
|
||||
/* perfmon */
|
||||
#if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON)
|
||||
|
@ -678,4 +739,10 @@ static inline void perfmon_init(void) {}
|
|||
static inline void perfmon_exit(void) {}
|
||||
#endif
|
||||
|
||||
/* debugfs */
|
||||
int idxd_device_init_debugfs(struct idxd_device *idxd);
|
||||
void idxd_device_remove_debugfs(struct idxd_device *idxd);
|
||||
int idxd_init_debugfs(void);
|
||||
void idxd_remove_debugfs(void);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/aer.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
#include <linux/device.h>
|
||||
|
@ -47,6 +46,9 @@ static struct idxd_driver_data idxd_driver_data[] = {
|
|||
.compl_size = sizeof(struct dsa_completion_record),
|
||||
.align = 32,
|
||||
.dev_type = &dsa_device_type,
|
||||
.evl_cr_off = offsetof(struct dsa_evl_entry, cr),
|
||||
.cr_status_off = offsetof(struct dsa_completion_record, status),
|
||||
.cr_result_off = offsetof(struct dsa_completion_record, result),
|
||||
},
|
||||
[IDXD_TYPE_IAX] = {
|
||||
.name_prefix = "iax",
|
||||
|
@ -54,6 +56,9 @@ static struct idxd_driver_data idxd_driver_data[] = {
|
|||
.compl_size = sizeof(struct iax_completion_record),
|
||||
.align = 64,
|
||||
.dev_type = &iax_device_type,
|
||||
.evl_cr_off = offsetof(struct iax_evl_entry, cr),
|
||||
.cr_status_off = offsetof(struct iax_completion_record, status),
|
||||
.cr_result_off = offsetof(struct iax_completion_record, error_code),
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -200,6 +205,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
|
|||
}
|
||||
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
|
||||
}
|
||||
mutex_init(&wq->uc_lock);
|
||||
xa_init(&wq->upasid_xa);
|
||||
idxd->wqs[i] = wq;
|
||||
}
|
||||
|
||||
|
@ -332,6 +339,33 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
|
|||
destroy_workqueue(idxd->wq);
|
||||
}
|
||||
|
||||
static int idxd_init_evl(struct idxd_device *idxd)
|
||||
{
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
struct idxd_evl *evl;
|
||||
|
||||
if (idxd->hw.gen_cap.evl_support == 0)
|
||||
return 0;
|
||||
|
||||
evl = kzalloc_node(sizeof(*evl), GFP_KERNEL, dev_to_node(dev));
|
||||
if (!evl)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&evl->lock);
|
||||
evl->size = IDXD_EVL_SIZE_MIN;
|
||||
|
||||
idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)),
|
||||
sizeof(struct idxd_evl_fault) + evl_ent_size(idxd),
|
||||
0, 0, NULL);
|
||||
if (!idxd->evl_cache) {
|
||||
kfree(evl);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
idxd->evl = evl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int idxd_setup_internals(struct idxd_device *idxd)
|
||||
{
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
|
@ -357,8 +391,14 @@ static int idxd_setup_internals(struct idxd_device *idxd)
|
|||
goto err_wkq_create;
|
||||
}
|
||||
|
||||
rc = idxd_init_evl(idxd);
|
||||
if (rc < 0)
|
||||
goto err_evl;
|
||||
|
||||
return 0;
|
||||
|
||||
err_evl:
|
||||
destroy_workqueue(idxd->wq);
|
||||
err_wkq_create:
|
||||
for (i = 0; i < idxd->max_groups; i++)
|
||||
put_device(group_confdev(idxd->groups[i]));
|
||||
|
@ -389,7 +429,7 @@ static void idxd_read_table_offsets(struct idxd_device *idxd)
|
|||
dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
|
||||
}
|
||||
|
||||
static void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count)
|
||||
void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count)
|
||||
{
|
||||
int i, j, nr;
|
||||
|
||||
|
@ -461,6 +501,10 @@ static void idxd_read_caps(struct idxd_device *idxd)
|
|||
dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
|
||||
}
|
||||
multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4);
|
||||
|
||||
/* read iaa cap */
|
||||
if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2)
|
||||
idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET);
|
||||
}
|
||||
|
||||
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
|
||||
|
@ -661,6 +705,10 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
goto err_dev_register;
|
||||
}
|
||||
|
||||
rc = idxd_device_init_debugfs(idxd);
|
||||
if (rc)
|
||||
dev_warn(dev, "IDXD debugfs failed to setup\n");
|
||||
|
||||
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
|
||||
idxd->hw.version);
|
||||
|
||||
|
@ -723,6 +771,7 @@ static void idxd_remove(struct pci_dev *pdev)
|
|||
idxd_shutdown(pdev);
|
||||
if (device_pasid_enabled(idxd))
|
||||
idxd_disable_system_pasid(idxd);
|
||||
idxd_device_remove_debugfs(idxd);
|
||||
|
||||
irq_entry = idxd_get_ie(idxd, 0);
|
||||
free_irq(irq_entry->vector, irq_entry);
|
||||
|
@ -780,6 +829,10 @@ static int __init idxd_init_module(void)
|
|||
if (err)
|
||||
goto err_cdev_register;
|
||||
|
||||
err = idxd_init_debugfs();
|
||||
if (err)
|
||||
goto err_debugfs;
|
||||
|
||||
err = pci_register_driver(&idxd_pci_driver);
|
||||
if (err)
|
||||
goto err_pci_register;
|
||||
|
@ -787,6 +840,8 @@ static int __init idxd_init_module(void)
|
|||
return 0;
|
||||
|
||||
err_pci_register:
|
||||
idxd_remove_debugfs();
|
||||
err_debugfs:
|
||||
idxd_cdev_remove();
|
||||
err_cdev_register:
|
||||
idxd_driver_unregister(&idxd_user_drv);
|
||||
|
@ -807,5 +862,6 @@ static void __exit idxd_exit_module(void)
|
|||
pci_unregister_driver(&idxd_pci_driver);
|
||||
idxd_cdev_remove();
|
||||
perfmon_exit();
|
||||
idxd_remove_debugfs();
|
||||
}
|
||||
module_exit(idxd_exit_module);
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <uapi/linux/idxd.h>
|
||||
#include "../dmaengine.h"
|
||||
#include "idxd.h"
|
||||
|
@ -217,13 +219,187 @@ static void idxd_int_handle_revoke(struct work_struct *work)
|
|||
kfree(revoke);
|
||||
}
|
||||
|
||||
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
||||
static void idxd_evl_fault_work(struct work_struct *work)
|
||||
{
|
||||
struct idxd_evl_fault *fault = container_of(work, struct idxd_evl_fault, work);
|
||||
struct idxd_wq *wq = fault->wq;
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
struct idxd_evl *evl = idxd->evl;
|
||||
struct __evl_entry *entry_head = fault->entry;
|
||||
void *cr = (void *)entry_head + idxd->data->evl_cr_off;
|
||||
int cr_size = idxd->data->compl_size;
|
||||
u8 *status = (u8 *)cr + idxd->data->cr_status_off;
|
||||
u8 *result = (u8 *)cr + idxd->data->cr_result_off;
|
||||
int copied, copy_size;
|
||||
bool *bf;
|
||||
|
||||
switch (fault->status) {
|
||||
case DSA_COMP_CRA_XLAT:
|
||||
if (entry_head->batch && entry_head->first_err_in_batch)
|
||||
evl->batch_fail[entry_head->batch_id] = false;
|
||||
|
||||
copy_size = cr_size;
|
||||
idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULTS);
|
||||
break;
|
||||
case DSA_COMP_BATCH_EVL_ERR:
|
||||
bf = &evl->batch_fail[entry_head->batch_id];
|
||||
|
||||
copy_size = entry_head->rcr || *bf ? cr_size : 0;
|
||||
if (*bf) {
|
||||
if (*status == DSA_COMP_SUCCESS)
|
||||
*status = DSA_COMP_BATCH_FAIL;
|
||||
*result = 1;
|
||||
*bf = false;
|
||||
}
|
||||
idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULTS);
|
||||
break;
|
||||
case DSA_COMP_DRAIN_EVL:
|
||||
copy_size = cr_size;
|
||||
break;
|
||||
default:
|
||||
copy_size = 0;
|
||||
dev_dbg_ratelimited(dev, "Unrecognized error code: %#x\n", fault->status);
|
||||
break;
|
||||
}
|
||||
|
||||
if (copy_size == 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Copy completion record to fault_addr in user address space
|
||||
* that is found by wq and PASID.
|
||||
*/
|
||||
copied = idxd_copy_cr(wq, entry_head->pasid, entry_head->fault_addr,
|
||||
cr, copy_size);
|
||||
/*
|
||||
* The task that triggered the page fault is unknown currently
|
||||
* because multiple threads may share the user address
|
||||
* space or the task exits already before this fault.
|
||||
* So if the copy fails, SIGSEGV can not be sent to the task.
|
||||
* Just print an error for the failure. The user application
|
||||
* waiting for the completion record will time out on this
|
||||
* failure.
|
||||
*/
|
||||
switch (fault->status) {
|
||||
case DSA_COMP_CRA_XLAT:
|
||||
if (copied != copy_size) {
|
||||
idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULT_FAILS);
|
||||
dev_dbg_ratelimited(dev, "Failed to write to completion record: (%d:%d)\n",
|
||||
copy_size, copied);
|
||||
if (entry_head->batch)
|
||||
evl->batch_fail[entry_head->batch_id] = true;
|
||||
}
|
||||
break;
|
||||
case DSA_COMP_BATCH_EVL_ERR:
|
||||
if (copied != copy_size) {
|
||||
idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULT_FAILS);
|
||||
dev_dbg_ratelimited(dev, "Failed to write to batch completion record: (%d:%d)\n",
|
||||
copy_size, copied);
|
||||
}
|
||||
break;
|
||||
case DSA_COMP_DRAIN_EVL:
|
||||
if (copied != copy_size)
|
||||
dev_dbg_ratelimited(dev, "Failed to write to drain completion record: (%d:%d)\n",
|
||||
copy_size, copied);
|
||||
break;
|
||||
}
|
||||
|
||||
kmem_cache_free(idxd->evl_cache, fault);
|
||||
}
|
||||
|
||||
static void process_evl_entry(struct idxd_device *idxd,
|
||||
struct __evl_entry *entry_head, unsigned int index)
|
||||
{
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
struct idxd_evl *evl = idxd->evl;
|
||||
u8 status;
|
||||
|
||||
if (test_bit(index, evl->bmap)) {
|
||||
clear_bit(index, evl->bmap);
|
||||
} else {
|
||||
status = DSA_COMP_STATUS(entry_head->error);
|
||||
|
||||
if (status == DSA_COMP_CRA_XLAT || status == DSA_COMP_DRAIN_EVL ||
|
||||
status == DSA_COMP_BATCH_EVL_ERR) {
|
||||
struct idxd_evl_fault *fault;
|
||||
int ent_size = evl_ent_size(idxd);
|
||||
|
||||
if (entry_head->rci)
|
||||
dev_dbg(dev, "Completion Int Req set, ignoring!\n");
|
||||
|
||||
if (!entry_head->rcr && status == DSA_COMP_DRAIN_EVL)
|
||||
return;
|
||||
|
||||
fault = kmem_cache_alloc(idxd->evl_cache, GFP_ATOMIC);
|
||||
if (fault) {
|
||||
struct idxd_wq *wq = idxd->wqs[entry_head->wq_idx];
|
||||
|
||||
fault->wq = wq;
|
||||
fault->status = status;
|
||||
memcpy(&fault->entry, entry_head, ent_size);
|
||||
INIT_WORK(&fault->work, idxd_evl_fault_work);
|
||||
queue_work(wq->wq, &fault->work);
|
||||
} else {
|
||||
dev_warn(dev, "Failed to service fault work.\n");
|
||||
}
|
||||
} else {
|
||||
dev_warn_ratelimited(dev, "Device error %#x operation: %#x fault addr: %#llx\n",
|
||||
status, entry_head->operation,
|
||||
entry_head->fault_addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void process_evl_entries(struct idxd_device *idxd)
|
||||
{
|
||||
union evl_status_reg evl_status;
|
||||
unsigned int h, t;
|
||||
struct idxd_evl *evl = idxd->evl;
|
||||
struct __evl_entry *entry_head;
|
||||
unsigned int ent_size = evl_ent_size(idxd);
|
||||
u32 size;
|
||||
|
||||
evl_status.bits = 0;
|
||||
evl_status.int_pending = 1;
|
||||
|
||||
spin_lock(&evl->lock);
|
||||
/* Clear interrupt pending bit */
|
||||
iowrite32(evl_status.bits_upper32,
|
||||
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
|
||||
h = evl->head;
|
||||
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||
t = evl_status.tail;
|
||||
size = idxd->evl->size;
|
||||
|
||||
while (h != t) {
|
||||
entry_head = (struct __evl_entry *)(evl->log + (h * ent_size));
|
||||
process_evl_entry(idxd, entry_head, h);
|
||||
h = (h + 1) % size;
|
||||
}
|
||||
|
||||
evl->head = h;
|
||||
evl_status.head = h;
|
||||
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||
spin_unlock(&evl->lock);
|
||||
}
|
||||
|
||||
irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||
{
|
||||
struct idxd_irq_entry *irq_entry = data;
|
||||
struct idxd_device *idxd = ie_to_idxd(irq_entry);
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
union gensts_reg gensts;
|
||||
u32 val = 0;
|
||||
int i;
|
||||
bool err = false;
|
||||
u32 cause;
|
||||
|
||||
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
if (!cause)
|
||||
return IRQ_NONE;
|
||||
|
||||
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
|
||||
if (cause & IDXD_INTC_HALT_STATE)
|
||||
goto halt;
|
||||
|
@ -295,13 +471,18 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
|||
perfmon_counter_overflow(idxd);
|
||||
}
|
||||
|
||||
if (cause & IDXD_INTC_EVL) {
|
||||
val |= IDXD_INTC_EVL;
|
||||
process_evl_entries(idxd);
|
||||
}
|
||||
|
||||
val ^= cause;
|
||||
if (val)
|
||||
dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
|
||||
val);
|
||||
|
||||
if (!err)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
halt:
|
||||
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
|
||||
|
@ -324,33 +505,10 @@ halt:
|
|||
"idxd halted, need %s.\n",
|
||||
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
|
||||
"FLR" : "system reset");
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||
{
|
||||
struct idxd_irq_entry *irq_entry = data;
|
||||
struct idxd_device *idxd = ie_to_idxd(irq_entry);
|
||||
int rc;
|
||||
u32 cause;
|
||||
|
||||
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
if (cause)
|
||||
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
|
||||
while (cause) {
|
||||
rc = process_misc_interrupts(idxd, cause);
|
||||
if (rc < 0)
|
||||
break;
|
||||
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
if (cause)
|
||||
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
}
|
||||
|
||||
out:
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
#ifndef _IDXD_REGISTERS_H_
|
||||
#define _IDXD_REGISTERS_H_
|
||||
|
||||
#include <uapi/linux/idxd.h>
|
||||
|
||||
/* PCI Config */
|
||||
#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
|
||||
#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
|
||||
|
@ -31,7 +33,9 @@ union gen_cap_reg {
|
|||
u64 rsvd:3;
|
||||
u64 dest_readback:1;
|
||||
u64 drain_readback:1;
|
||||
u64 rsvd2:6;
|
||||
u64 rsvd2:3;
|
||||
u64 evl_support:2;
|
||||
u64 batch_continuation:1;
|
||||
u64 max_xfer_shift:5;
|
||||
u64 max_batch_shift:4;
|
||||
u64 max_ims_mult:6;
|
||||
|
@ -55,7 +59,8 @@ union wq_cap_reg {
|
|||
u64 occupancy:1;
|
||||
u64 occupancy_int:1;
|
||||
u64 op_config:1;
|
||||
u64 rsvd3:9;
|
||||
u64 wq_prs_support:1;
|
||||
u64 rsvd4:8;
|
||||
};
|
||||
u64 bits;
|
||||
} __packed;
|
||||
|
@ -117,7 +122,8 @@ union gencfg_reg {
|
|||
u32 rdbuf_limit:8;
|
||||
u32 rsvd:4;
|
||||
u32 user_int_en:1;
|
||||
u32 rsvd2:19;
|
||||
u32 evl_en:1;
|
||||
u32 rsvd2:18;
|
||||
};
|
||||
u32 bits;
|
||||
} __packed;
|
||||
|
@ -127,7 +133,8 @@ union genctrl_reg {
|
|||
struct {
|
||||
u32 softerr_int_en:1;
|
||||
u32 halt_int_en:1;
|
||||
u32 rsvd:30;
|
||||
u32 evl_int_en:1;
|
||||
u32 rsvd:29;
|
||||
};
|
||||
u32 bits;
|
||||
} __packed;
|
||||
|
@ -162,6 +169,7 @@ enum idxd_device_reset_type {
|
|||
#define IDXD_INTC_OCCUPY 0x04
|
||||
#define IDXD_INTC_PERFMON_OVFL 0x08
|
||||
#define IDXD_INTC_HALT_STATE 0x10
|
||||
#define IDXD_INTC_EVL 0x20
|
||||
#define IDXD_INTC_INT_HANDLE_REVOKED 0x80000000
|
||||
|
||||
#define IDXD_CMD_OFFSET 0xa0
|
||||
|
@ -276,6 +284,45 @@ union sw_err_reg {
|
|||
u64 bits[4];
|
||||
} __packed;
|
||||
|
||||
union iaa_cap_reg {
|
||||
struct {
|
||||
u64 dec_aecs_format_ver:1;
|
||||
u64 drop_init_bits:1;
|
||||
u64 chaining:1;
|
||||
u64 force_array_output_mod:1;
|
||||
u64 load_part_aecs:1;
|
||||
u64 comp_early_abort:1;
|
||||
u64 nested_comp:1;
|
||||
u64 diction_comp:1;
|
||||
u64 header_gen:1;
|
||||
u64 crypto_gcm:1;
|
||||
u64 crypto_cfb:1;
|
||||
u64 crypto_xts:1;
|
||||
u64 rsvd:52;
|
||||
};
|
||||
u64 bits;
|
||||
} __packed;
|
||||
|
||||
#define IDXD_IAACAP_OFFSET 0x180
|
||||
|
||||
#define IDXD_EVLCFG_OFFSET 0xe0
|
||||
union evlcfg_reg {
|
||||
struct {
|
||||
u64 pasid_en:1;
|
||||
u64 priv:1;
|
||||
u64 rsvd:10;
|
||||
u64 base_addr:52;
|
||||
|
||||
u64 size:16;
|
||||
u64 pasid:20;
|
||||
u64 rsvd2:28;
|
||||
};
|
||||
u64 bits[2];
|
||||
} __packed;
|
||||
|
||||
#define IDXD_EVL_SIZE_MIN 0x0040
|
||||
#define IDXD_EVL_SIZE_MAX 0xffff
|
||||
|
||||
union msix_perm {
|
||||
struct {
|
||||
u32 rsvd:2;
|
||||
|
@ -325,7 +372,7 @@ union wqcfg {
|
|||
u32 mode:1; /* shared or dedicated */
|
||||
u32 bof:1; /* block on fault */
|
||||
u32 wq_ats_disable:1;
|
||||
u32 rsvd2:1;
|
||||
u32 wq_prs_disable:1;
|
||||
u32 priority:4;
|
||||
u32 pasid:20;
|
||||
u32 pasid_en:1;
|
||||
|
@ -513,4 +560,73 @@ union filter_cfg {
|
|||
u64 val;
|
||||
} __packed;
|
||||
|
||||
#define IDXD_EVLSTATUS_OFFSET 0xf0
|
||||
|
||||
union evl_status_reg {
|
||||
struct {
|
||||
u32 head:16;
|
||||
u32 rsvd:16;
|
||||
u32 tail:16;
|
||||
u32 rsvd2:14;
|
||||
u32 int_pending:1;
|
||||
u32 rsvd3:1;
|
||||
};
|
||||
struct {
|
||||
u32 bits_lower32;
|
||||
u32 bits_upper32;
|
||||
};
|
||||
u64 bits;
|
||||
} __packed;
|
||||
|
||||
#define IDXD_MAX_BATCH_IDENT 256
|
||||
|
||||
struct __evl_entry {
|
||||
u64 rsvd:2;
|
||||
u64 desc_valid:1;
|
||||
u64 wq_idx_valid:1;
|
||||
u64 batch:1;
|
||||
u64 fault_rw:1;
|
||||
u64 priv:1;
|
||||
u64 err_info_valid:1;
|
||||
u64 error:8;
|
||||
u64 wq_idx:8;
|
||||
u64 batch_id:8;
|
||||
u64 operation:8;
|
||||
u64 pasid:20;
|
||||
u64 rsvd2:4;
|
||||
|
||||
u16 batch_idx;
|
||||
u16 rsvd3;
|
||||
union {
|
||||
/* Invalid Flags 0x11 */
|
||||
u32 invalid_flags;
|
||||
/* Invalid Int Handle 0x19 */
|
||||
/* Page fault 0x1a */
|
||||
/* Page fault 0x06, 0x1f, only operand_id */
|
||||
/* Page fault before drain or in batch, 0x26, 0x27 */
|
||||
struct {
|
||||
u16 int_handle;
|
||||
u16 rci:1;
|
||||
u16 ims:1;
|
||||
u16 rcr:1;
|
||||
u16 first_err_in_batch:1;
|
||||
u16 rsvd4_2:9;
|
||||
u16 operand_id:3;
|
||||
};
|
||||
};
|
||||
u64 fault_addr;
|
||||
u64 rsvd5;
|
||||
} __packed;
|
||||
|
||||
struct dsa_evl_entry {
|
||||
struct __evl_entry e;
|
||||
struct dsa_completion_record cr;
|
||||
} __packed;
|
||||
|
||||
struct iax_evl_entry {
|
||||
struct __evl_entry e;
|
||||
u64 rsvd[4];
|
||||
struct iax_completion_record cr;
|
||||
} __packed;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -822,10 +822,14 @@ static ssize_t wq_block_on_fault_store(struct device *dev,
|
|||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
if (bof)
|
||||
if (bof) {
|
||||
if (test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
|
||||
else
|
||||
} else {
|
||||
clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -1109,6 +1113,44 @@ static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute
|
|||
static struct device_attribute dev_attr_wq_ats_disable =
|
||||
__ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
|
||||
|
||||
static ssize_t wq_prs_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct idxd_wq *wq = confdev_to_wq(dev);
|
||||
|
||||
return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags));
|
||||
}
|
||||
|
||||
static ssize_t wq_prs_disable_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct idxd_wq *wq = confdev_to_wq(dev);
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
bool prs_dis;
|
||||
int rc;
|
||||
|
||||
if (wq->state != IDXD_WQ_DISABLED)
|
||||
return -EPERM;
|
||||
|
||||
if (!idxd->hw.wq_cap.wq_prs_support)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
rc = kstrtobool(buf, &prs_dis);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
if (prs_dis) {
|
||||
set_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
|
||||
/* when PRS is disabled, BOF needs to be off as well */
|
||||
clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
|
||||
} else {
|
||||
clear_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_wq_prs_disable =
|
||||
__ATTR(prs_disable, 0644, wq_prs_disable_show, wq_prs_disable_store);
|
||||
|
||||
static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct idxd_wq *wq = confdev_to_wq(dev);
|
||||
|
@ -1239,6 +1281,7 @@ static struct attribute *idxd_wq_attributes[] = {
|
|||
&dev_attr_wq_max_transfer_size.attr,
|
||||
&dev_attr_wq_max_batch_size.attr,
|
||||
&dev_attr_wq_ats_disable.attr,
|
||||
&dev_attr_wq_prs_disable.attr,
|
||||
&dev_attr_wq_occupancy.attr,
|
||||
&dev_attr_wq_enqcmds_retries.attr,
|
||||
&dev_attr_wq_op_config.attr,
|
||||
|
@ -1260,6 +1303,13 @@ static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
|
|||
idxd->data->type == IDXD_TYPE_IAX;
|
||||
}
|
||||
|
||||
static bool idxd_wq_attr_wq_prs_disable_invisible(struct attribute *attr,
|
||||
struct idxd_device *idxd)
|
||||
{
|
||||
return attr == &dev_attr_wq_prs_disable.attr &&
|
||||
!idxd->hw.wq_cap.wq_prs_support;
|
||||
}
|
||||
|
||||
static umode_t idxd_wq_attr_visible(struct kobject *kobj,
|
||||
struct attribute *attr, int n)
|
||||
{
|
||||
|
@ -1273,6 +1323,9 @@ static umode_t idxd_wq_attr_visible(struct kobject *kobj,
|
|||
if (idxd_wq_attr_max_batch_size_invisible(attr, idxd))
|
||||
return 0;
|
||||
|
||||
if (idxd_wq_attr_wq_prs_disable_invisible(attr, idxd))
|
||||
return 0;
|
||||
|
||||
return attr->mode;
|
||||
}
|
||||
|
||||
|
@ -1292,6 +1345,7 @@ static void idxd_conf_wq_release(struct device *dev)
|
|||
|
||||
bitmap_free(wq->opcap_bmap);
|
||||
kfree(wq->wqcfg);
|
||||
xa_destroy(&wq->upasid_xa);
|
||||
kfree(wq);
|
||||
}
|
||||
|
||||
|
@ -1452,15 +1506,13 @@ static ssize_t errors_show(struct device *dev,
|
|||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct idxd_device *idxd = confdev_to_idxd(dev);
|
||||
int i, out = 0;
|
||||
DECLARE_BITMAP(swerr_bmap, 256);
|
||||
|
||||
bitmap_zero(swerr_bmap, 256);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
for (i = 0; i < 4; i++)
|
||||
out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
|
||||
multi_u64_to_bmap(swerr_bmap, &idxd->sw_err.bits[0], 4);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
out--;
|
||||
out += sysfs_emit_at(buf, out, "\n");
|
||||
return out;
|
||||
return sysfs_emit(buf, "%*pb\n", 256, swerr_bmap);
|
||||
}
|
||||
static DEVICE_ATTR_RO(errors);
|
||||
|
||||
|
@ -1563,6 +1615,59 @@ static ssize_t cmd_status_store(struct device *dev, struct device_attribute *att
|
|||
}
|
||||
static DEVICE_ATTR_RW(cmd_status);
|
||||
|
||||
static ssize_t iaa_cap_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct idxd_device *idxd = confdev_to_idxd(dev);
|
||||
|
||||
if (idxd->hw.version < DEVICE_VERSION_2)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return sysfs_emit(buf, "%#llx\n", idxd->hw.iaa_cap.bits);
|
||||
}
|
||||
static DEVICE_ATTR_RO(iaa_cap);
|
||||
|
||||
static ssize_t event_log_size_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct idxd_device *idxd = confdev_to_idxd(dev);
|
||||
|
||||
if (!idxd->evl)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return sysfs_emit(buf, "%u\n", idxd->evl->size);
|
||||
}
|
||||
|
||||
static ssize_t event_log_size_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct idxd_device *idxd = confdev_to_idxd(dev);
|
||||
unsigned long val;
|
||||
int rc;
|
||||
|
||||
if (!idxd->evl)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
rc = kstrtoul(buf, 10, &val);
|
||||
if (rc < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (idxd->state == IDXD_DEV_ENABLED)
|
||||
return -EPERM;
|
||||
|
||||
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
|
||||
return -EPERM;
|
||||
|
||||
if (val < IDXD_EVL_SIZE_MIN || val > IDXD_EVL_SIZE_MAX ||
|
||||
(val * evl_ent_size(idxd) > ULONG_MAX - idxd->evl->dma))
|
||||
return -EINVAL;
|
||||
|
||||
idxd->evl->size = val;
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR_RW(event_log_size);
|
||||
|
||||
static bool idxd_device_attr_max_batch_size_invisible(struct attribute *attr,
|
||||
struct idxd_device *idxd)
|
||||
{
|
||||
|
@ -1585,6 +1690,21 @@ static bool idxd_device_attr_read_buffers_invisible(struct attribute *attr,
|
|||
idxd->data->type == IDXD_TYPE_IAX;
|
||||
}
|
||||
|
||||
static bool idxd_device_attr_iaa_cap_invisible(struct attribute *attr,
|
||||
struct idxd_device *idxd)
|
||||
{
|
||||
return attr == &dev_attr_iaa_cap.attr &&
|
||||
(idxd->data->type != IDXD_TYPE_IAX ||
|
||||
idxd->hw.version < DEVICE_VERSION_2);
|
||||
}
|
||||
|
||||
static bool idxd_device_attr_event_log_size_invisible(struct attribute *attr,
|
||||
struct idxd_device *idxd)
|
||||
{
|
||||
return (attr == &dev_attr_event_log_size.attr &&
|
||||
!idxd->hw.gen_cap.evl_support);
|
||||
}
|
||||
|
||||
static umode_t idxd_device_attr_visible(struct kobject *kobj,
|
||||
struct attribute *attr, int n)
|
||||
{
|
||||
|
@ -1597,6 +1717,12 @@ static umode_t idxd_device_attr_visible(struct kobject *kobj,
|
|||
if (idxd_device_attr_read_buffers_invisible(attr, idxd))
|
||||
return 0;
|
||||
|
||||
if (idxd_device_attr_iaa_cap_invisible(attr, idxd))
|
||||
return 0;
|
||||
|
||||
if (idxd_device_attr_event_log_size_invisible(attr, idxd))
|
||||
return 0;
|
||||
|
||||
return attr->mode;
|
||||
}
|
||||
|
||||
|
@ -1622,6 +1748,8 @@ static struct attribute *idxd_device_attributes[] = {
|
|||
&dev_attr_read_buffer_limit.attr,
|
||||
&dev_attr_cdev_major.attr,
|
||||
&dev_attr_cmd_status.attr,
|
||||
&dev_attr_iaa_cap.attr,
|
||||
&dev_attr_event_log_size.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -1643,6 +1771,8 @@ static void idxd_conf_device_release(struct device *dev)
|
|||
bitmap_free(idxd->wq_enable_map);
|
||||
kfree(idxd->wqs);
|
||||
kfree(idxd->engines);
|
||||
kfree(idxd->evl);
|
||||
kmem_cache_destroy(idxd->evl_cache);
|
||||
ida_free(&idxd_ida, idxd->id);
|
||||
bitmap_free(idxd->opcap_bmap);
|
||||
kfree(idxd);
|
||||
|
|
|
@ -750,7 +750,6 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan)
|
|||
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
|
||||
if (!desc)
|
||||
break;
|
||||
memset(&desc->desc, 0, sizeof(struct dma_async_tx_descriptor));
|
||||
dma_async_tx_descriptor_init(&desc->desc, chan);
|
||||
desc->desc.tx_submit = imxdma_tx_submit;
|
||||
/* txd.flags will be overwritten in prep funcs */
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
#include <linux/workqueue.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/dca.h>
|
||||
#include <linux/aer.h>
|
||||
#include <linux/sizes.h>
|
||||
#include "dma.h"
|
||||
#include "registers.h"
|
||||
|
@ -1191,13 +1190,13 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
|
|||
ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
|
||||
|
||||
/* disable relaxed ordering */
|
||||
err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16);
|
||||
err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16);
|
||||
if (err)
|
||||
return pcibios_err_to_errno(err);
|
||||
|
||||
/* clear relaxed ordering enable */
|
||||
val16 &= ~IOAT_DEVCTRL_ROE;
|
||||
err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16);
|
||||
val16 &= ~PCI_EXP_DEVCTL_RELAX_EN;
|
||||
err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16);
|
||||
if (err)
|
||||
return pcibios_err_to_errno(err);
|
||||
|
||||
|
@ -1380,15 +1379,11 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
if (is_skx_ioat(pdev))
|
||||
device->version = IOAT_VER_3_2;
|
||||
err = ioat3_dma_probe(device, ioat_dca_enabled);
|
||||
|
||||
if (device->version >= IOAT_VER_3_3)
|
||||
pci_enable_pcie_error_reporting(pdev);
|
||||
} else
|
||||
return -ENODEV;
|
||||
|
||||
if (err) {
|
||||
dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -1411,7 +1406,6 @@ static void ioat_remove(struct pci_dev *pdev)
|
|||
device->dca = NULL;
|
||||
}
|
||||
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
ioat_dma_remove(device);
|
||||
}
|
||||
|
||||
|
|
|
@ -14,13 +14,6 @@
|
|||
#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
|
||||
#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
|
||||
|
||||
/* PCIe config registers */
|
||||
|
||||
/* EXPCAPID + N */
|
||||
#define IOAT_DEVCTRL_OFFSET 0x8
|
||||
/* relaxed ordering enable */
|
||||
#define IOAT_DEVCTRL_ROE 0x10
|
||||
|
||||
/* MMIO Device Registers */
|
||||
#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */
|
||||
|
||||
|
|
|
@ -739,32 +739,18 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
|
||||
if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
|
||||
if (!IS_ERR(xor_dev->reg_clk)) {
|
||||
ret = clk_prepare_enable(xor_dev->reg_clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
return PTR_ERR(xor_dev->reg_clk);
|
||||
}
|
||||
}
|
||||
xor_dev->reg_clk = devm_clk_get_optional_enabled(&pdev->dev, "reg");
|
||||
if (IS_ERR(xor_dev->reg_clk))
|
||||
return PTR_ERR(xor_dev->reg_clk);
|
||||
|
||||
xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
|
||||
ret = EPROBE_DEFER;
|
||||
goto disable_reg_clk;
|
||||
}
|
||||
if (!IS_ERR(xor_dev->clk)) {
|
||||
ret = clk_prepare_enable(xor_dev->clk);
|
||||
if (ret)
|
||||
goto disable_reg_clk;
|
||||
}
|
||||
xor_dev->clk = devm_clk_get_enabled(&pdev->dev, NULL);
|
||||
if (IS_ERR(xor_dev->clk))
|
||||
return PTR_ERR(xor_dev->clk);
|
||||
|
||||
ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
|
||||
mv_xor_v2_set_msi_msg);
|
||||
if (ret)
|
||||
goto disable_clk;
|
||||
return ret;
|
||||
|
||||
xor_dev->irq = msi_get_virq(&pdev->dev, 0);
|
||||
|
||||
|
@ -866,10 +852,6 @@ free_hw_desq:
|
|||
xor_dev->hw_desq_virt, xor_dev->hw_desq);
|
||||
free_msi_irqs:
|
||||
platform_msi_domain_free_irqs(&pdev->dev);
|
||||
disable_clk:
|
||||
clk_disable_unprepare(xor_dev->clk);
|
||||
disable_reg_clk:
|
||||
clk_disable_unprepare(xor_dev->reg_clk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -889,9 +871,6 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
|
|||
|
||||
tasklet_kill(&xor_dev->irq_tasklet);
|
||||
|
||||
clk_disable_unprepare(xor_dev->clk);
|
||||
clk_disable_unprepare(xor_dev->reg_clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -264,7 +264,7 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
|
|||
}
|
||||
|
||||
/* Silently fail if there is not even the "dmas" property */
|
||||
if (!of_find_property(np, "dmas", NULL))
|
||||
if (!of_property_present(np, "dmas"))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
count = of_property_count_strings(np, "dma-names");
|
||||
|
|
|
@ -1966,7 +1966,6 @@ error_alloc_ev_ring:
|
|||
error_config_int:
|
||||
gpi_free_ring(&gpii->ev_ring, gpii);
|
||||
exit_gpi_init:
|
||||
mutex_unlock(&gpii->ctrl_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/slab.h>
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
|
@ -66,8 +67,6 @@ struct rz_dmac_chan {
|
|||
struct rz_dmac_desc *desc;
|
||||
int descs_allocated;
|
||||
|
||||
enum dma_slave_buswidth src_word_size;
|
||||
enum dma_slave_buswidth dst_word_size;
|
||||
dma_addr_t src_per_address;
|
||||
dma_addr_t dst_per_address;
|
||||
|
||||
|
@ -92,6 +91,7 @@ struct rz_dmac_chan {
|
|||
struct rz_dmac {
|
||||
struct dma_device engine;
|
||||
struct device *dev;
|
||||
struct reset_control *rstc;
|
||||
void __iomem *base;
|
||||
void __iomem *ext_base;
|
||||
|
||||
|
@ -601,9 +601,7 @@ static int rz_dmac_config(struct dma_chan *chan,
|
|||
u32 val;
|
||||
|
||||
channel->src_per_address = config->src_addr;
|
||||
channel->src_word_size = config->src_addr_width;
|
||||
channel->dst_per_address = config->dst_addr;
|
||||
channel->dst_word_size = config->dst_addr_width;
|
||||
|
||||
val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
|
||||
if (val == CHCFG_DS_INVALID)
|
||||
|
@ -889,6 +887,11 @@ static int rz_dmac_probe(struct platform_device *pdev)
|
|||
/* Initialize the channels. */
|
||||
INIT_LIST_HEAD(&dmac->engine.channels);
|
||||
|
||||
dmac->rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
|
||||
if (IS_ERR(dmac->rstc))
|
||||
return dev_err_probe(&pdev->dev, PTR_ERR(dmac->rstc),
|
||||
"failed to get resets\n");
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
ret = pm_runtime_resume_and_get(&pdev->dev);
|
||||
if (ret < 0) {
|
||||
|
@ -896,6 +899,10 @@ static int rz_dmac_probe(struct platform_device *pdev)
|
|||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
ret = reset_control_deassert(dmac->rstc);
|
||||
if (ret)
|
||||
goto err_pm_runtime_put;
|
||||
|
||||
for (i = 0; i < dmac->n_channels; i++) {
|
||||
ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i);
|
||||
if (ret < 0)
|
||||
|
@ -940,6 +947,7 @@ static int rz_dmac_probe(struct platform_device *pdev)
|
|||
dma_register_err:
|
||||
of_dma_controller_free(pdev->dev.of_node);
|
||||
err:
|
||||
reset_control_assert(dmac->rstc);
|
||||
channel_num = i ? i - 1 : 0;
|
||||
for (i = 0; i < channel_num; i++) {
|
||||
struct rz_dmac_chan *channel = &dmac->channels[i];
|
||||
|
@ -950,6 +958,7 @@ err:
|
|||
channel->lmdesc.base_dma);
|
||||
}
|
||||
|
||||
err_pm_runtime_put:
|
||||
pm_runtime_put(&pdev->dev);
|
||||
err_pm_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
@ -972,6 +981,7 @@ static int rz_dmac_remove(struct platform_device *pdev)
|
|||
}
|
||||
of_dma_controller_free(pdev->dev.of_node);
|
||||
dma_async_device_unregister(&dmac->engine);
|
||||
reset_control_assert(dmac->rstc);
|
||||
pm_runtime_put(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
|
|
|
@ -233,11 +233,6 @@ static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
|
|||
writel(val, tdma->base_addr + reg);
|
||||
}
|
||||
|
||||
static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
|
||||
{
|
||||
return readl(tdma->base_addr + reg);
|
||||
}
|
||||
|
||||
static inline void tdc_write(struct tegra_dma_channel *tdc,
|
||||
u32 reg, u32 val)
|
||||
{
|
||||
|
|
|
@ -11,6 +11,7 @@ k3-psil-lib-objs := k3-psil.o \
|
|||
k3-psil-am64.o \
|
||||
k3-psil-j721s2.o \
|
||||
k3-psil-am62.o \
|
||||
k3-psil-am62a.o
|
||||
k3-psil-am62a.o \
|
||||
k3-psil-j784s4.o
|
||||
obj-$(CONFIG_TI_K3_PSIL) += k3-psil-lib.o
|
||||
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
|
||||
|
|
|
@ -318,14 +318,6 @@ static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
|
|||
edma_write(ecc, offset, val);
|
||||
}
|
||||
|
||||
static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
|
||||
{
|
||||
unsigned val = edma_read(ecc, offset);
|
||||
|
||||
val &= and;
|
||||
edma_write(ecc, offset, val);
|
||||
}
|
||||
|
||||
static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
|
||||
{
|
||||
unsigned val = edma_read(ecc, offset);
|
||||
|
|
|
@ -0,0 +1,354 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include "k3-psil-priv.h"
|
||||
|
||||
#define PSIL_PDMA_XY_TR(x) \
|
||||
{ \
|
||||
.thread_id = x, \
|
||||
.ep_config = { \
|
||||
.ep_type = PSIL_EP_PDMA_XY, \
|
||||
}, \
|
||||
}
|
||||
|
||||
#define PSIL_PDMA_XY_PKT(x) \
|
||||
{ \
|
||||
.thread_id = x, \
|
||||
.ep_config = { \
|
||||
.ep_type = PSIL_EP_PDMA_XY, \
|
||||
.pkt_mode = 1, \
|
||||
}, \
|
||||
}
|
||||
|
||||
#define PSIL_PDMA_MCASP(x) \
|
||||
{ \
|
||||
.thread_id = x, \
|
||||
.ep_config = { \
|
||||
.ep_type = PSIL_EP_PDMA_XY, \
|
||||
.pdma_acc32 = 1, \
|
||||
.pdma_burst = 1, \
|
||||
}, \
|
||||
}
|
||||
|
||||
#define PSIL_ETHERNET(x) \
|
||||
{ \
|
||||
.thread_id = x, \
|
||||
.ep_config = { \
|
||||
.ep_type = PSIL_EP_NATIVE, \
|
||||
.pkt_mode = 1, \
|
||||
.needs_epib = 1, \
|
||||
.psd_size = 16, \
|
||||
}, \
|
||||
}
|
||||
|
||||
#define PSIL_SA2UL(x, tx) \
|
||||
{ \
|
||||
.thread_id = x, \
|
||||
.ep_config = { \
|
||||
.ep_type = PSIL_EP_NATIVE, \
|
||||
.pkt_mode = 1, \
|
||||
.needs_epib = 1, \
|
||||
.psd_size = 64, \
|
||||
.notdpkt = tx, \
|
||||
}, \
|
||||
}
|
||||
|
||||
#define PSIL_CSI2RX(x) \
|
||||
{ \
|
||||
.thread_id = x, \
|
||||
.ep_config = { \
|
||||
.ep_type = PSIL_EP_NATIVE, \
|
||||
}, \
|
||||
}
|
||||
|
||||
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
|
||||
static struct psil_ep j784s4_src_ep_map[] = {
|
||||
/* PDMA_MCASP - McASP0-4 */
|
||||
PSIL_PDMA_MCASP(0x4400),
|
||||
PSIL_PDMA_MCASP(0x4401),
|
||||
PSIL_PDMA_MCASP(0x4402),
|
||||
PSIL_PDMA_MCASP(0x4403),
|
||||
PSIL_PDMA_MCASP(0x4404),
|
||||
/* PDMA_SPI_G0 - SPI0-3 */
|
||||
PSIL_PDMA_XY_PKT(0x4600),
|
||||
PSIL_PDMA_XY_PKT(0x4601),
|
||||
PSIL_PDMA_XY_PKT(0x4602),
|
||||
PSIL_PDMA_XY_PKT(0x4603),
|
||||
PSIL_PDMA_XY_PKT(0x4604),
|
||||
PSIL_PDMA_XY_PKT(0x4605),
|
||||
PSIL_PDMA_XY_PKT(0x4606),
|
||||
PSIL_PDMA_XY_PKT(0x4607),
|
||||
PSIL_PDMA_XY_PKT(0x4608),
|
||||
PSIL_PDMA_XY_PKT(0x4609),
|
||||
PSIL_PDMA_XY_PKT(0x460a),
|
||||
PSIL_PDMA_XY_PKT(0x460b),
|
||||
PSIL_PDMA_XY_PKT(0x460c),
|
||||
PSIL_PDMA_XY_PKT(0x460d),
|
||||
PSIL_PDMA_XY_PKT(0x460e),
|
||||
PSIL_PDMA_XY_PKT(0x460f),
|
||||
/* PDMA_SPI_G1 - SPI4-7 */
|
||||
PSIL_PDMA_XY_PKT(0x4620),
|
||||
PSIL_PDMA_XY_PKT(0x4621),
|
||||
PSIL_PDMA_XY_PKT(0x4622),
|
||||
PSIL_PDMA_XY_PKT(0x4623),
|
||||
PSIL_PDMA_XY_PKT(0x4624),
|
||||
PSIL_PDMA_XY_PKT(0x4625),
|
||||
PSIL_PDMA_XY_PKT(0x4626),
|
||||
PSIL_PDMA_XY_PKT(0x4627),
|
||||
PSIL_PDMA_XY_PKT(0x4628),
|
||||
PSIL_PDMA_XY_PKT(0x4629),
|
||||
PSIL_PDMA_XY_PKT(0x462a),
|
||||
PSIL_PDMA_XY_PKT(0x462b),
|
||||
PSIL_PDMA_XY_PKT(0x462c),
|
||||
PSIL_PDMA_XY_PKT(0x462d),
|
||||
PSIL_PDMA_XY_PKT(0x462e),
|
||||
PSIL_PDMA_XY_PKT(0x462f),
|
||||
/* MAIN_CPSW2G */
|
||||
PSIL_ETHERNET(0x4640),
|
||||
/* PDMA_USART_G0 - UART0-1 */
|
||||
PSIL_PDMA_XY_PKT(0x4700),
|
||||
PSIL_PDMA_XY_PKT(0x4701),
|
||||
/* PDMA_USART_G1 - UART2-3 */
|
||||
PSIL_PDMA_XY_PKT(0x4702),
|
||||
PSIL_PDMA_XY_PKT(0x4703),
|
||||
/* PDMA_USART_G2 - UART4-9 */
|
||||
PSIL_PDMA_XY_PKT(0x4704),
|
||||
PSIL_PDMA_XY_PKT(0x4705),
|
||||
PSIL_PDMA_XY_PKT(0x4706),
|
||||
PSIL_PDMA_XY_PKT(0x4707),
|
||||
PSIL_PDMA_XY_PKT(0x4708),
|
||||
PSIL_PDMA_XY_PKT(0x4709),
|
||||
/* CSI2RX */
|
||||
PSIL_CSI2RX(0x4900),
|
||||
PSIL_CSI2RX(0x4901),
|
||||
PSIL_CSI2RX(0x4902),
|
||||
PSIL_CSI2RX(0x4903),
|
||||
PSIL_CSI2RX(0x4940),
|
||||
PSIL_CSI2RX(0x4941),
|
||||
PSIL_CSI2RX(0x4942),
|
||||
PSIL_CSI2RX(0x4943),
|
||||
PSIL_CSI2RX(0x4944),
|
||||
PSIL_CSI2RX(0x4945),
|
||||
PSIL_CSI2RX(0x4946),
|
||||
PSIL_CSI2RX(0x4947),
|
||||
PSIL_CSI2RX(0x4948),
|
||||
PSIL_CSI2RX(0x4949),
|
||||
PSIL_CSI2RX(0x494a),
|
||||
PSIL_CSI2RX(0x494b),
|
||||
PSIL_CSI2RX(0x494c),
|
||||
PSIL_CSI2RX(0x494d),
|
||||
PSIL_CSI2RX(0x494e),
|
||||
PSIL_CSI2RX(0x494f),
|
||||
PSIL_CSI2RX(0x4950),
|
||||
PSIL_CSI2RX(0x4951),
|
||||
PSIL_CSI2RX(0x4952),
|
||||
PSIL_CSI2RX(0x4953),
|
||||
PSIL_CSI2RX(0x4954),
|
||||
PSIL_CSI2RX(0x4955),
|
||||
PSIL_CSI2RX(0x4956),
|
||||
PSIL_CSI2RX(0x4957),
|
||||
PSIL_CSI2RX(0x4958),
|
||||
PSIL_CSI2RX(0x4959),
|
||||
PSIL_CSI2RX(0x495a),
|
||||
PSIL_CSI2RX(0x495b),
|
||||
PSIL_CSI2RX(0x495c),
|
||||
PSIL_CSI2RX(0x495d),
|
||||
PSIL_CSI2RX(0x495e),
|
||||
PSIL_CSI2RX(0x495f),
|
||||
PSIL_CSI2RX(0x4960),
|
||||
PSIL_CSI2RX(0x4961),
|
||||
PSIL_CSI2RX(0x4962),
|
||||
PSIL_CSI2RX(0x4963),
|
||||
PSIL_CSI2RX(0x4964),
|
||||
PSIL_CSI2RX(0x4965),
|
||||
PSIL_CSI2RX(0x4966),
|
||||
PSIL_CSI2RX(0x4967),
|
||||
PSIL_CSI2RX(0x4968),
|
||||
PSIL_CSI2RX(0x4969),
|
||||
PSIL_CSI2RX(0x496a),
|
||||
PSIL_CSI2RX(0x496b),
|
||||
PSIL_CSI2RX(0x496c),
|
||||
PSIL_CSI2RX(0x496d),
|
||||
PSIL_CSI2RX(0x496e),
|
||||
PSIL_CSI2RX(0x496f),
|
||||
PSIL_CSI2RX(0x4970),
|
||||
PSIL_CSI2RX(0x4971),
|
||||
PSIL_CSI2RX(0x4972),
|
||||
PSIL_CSI2RX(0x4973),
|
||||
PSIL_CSI2RX(0x4974),
|
||||
PSIL_CSI2RX(0x4975),
|
||||
PSIL_CSI2RX(0x4976),
|
||||
PSIL_CSI2RX(0x4977),
|
||||
PSIL_CSI2RX(0x4978),
|
||||
PSIL_CSI2RX(0x4979),
|
||||
PSIL_CSI2RX(0x497a),
|
||||
PSIL_CSI2RX(0x497b),
|
||||
PSIL_CSI2RX(0x497c),
|
||||
PSIL_CSI2RX(0x497d),
|
||||
PSIL_CSI2RX(0x497e),
|
||||
PSIL_CSI2RX(0x497f),
|
||||
PSIL_CSI2RX(0x4980),
|
||||
PSIL_CSI2RX(0x4981),
|
||||
PSIL_CSI2RX(0x4982),
|
||||
PSIL_CSI2RX(0x4983),
|
||||
PSIL_CSI2RX(0x4984),
|
||||
PSIL_CSI2RX(0x4985),
|
||||
PSIL_CSI2RX(0x4986),
|
||||
PSIL_CSI2RX(0x4987),
|
||||
PSIL_CSI2RX(0x4988),
|
||||
PSIL_CSI2RX(0x4989),
|
||||
PSIL_CSI2RX(0x498a),
|
||||
PSIL_CSI2RX(0x498b),
|
||||
PSIL_CSI2RX(0x498c),
|
||||
PSIL_CSI2RX(0x498d),
|
||||
PSIL_CSI2RX(0x498e),
|
||||
PSIL_CSI2RX(0x498f),
|
||||
PSIL_CSI2RX(0x4990),
|
||||
PSIL_CSI2RX(0x4991),
|
||||
PSIL_CSI2RX(0x4992),
|
||||
PSIL_CSI2RX(0x4993),
|
||||
PSIL_CSI2RX(0x4994),
|
||||
PSIL_CSI2RX(0x4995),
|
||||
PSIL_CSI2RX(0x4996),
|
||||
PSIL_CSI2RX(0x4997),
|
||||
PSIL_CSI2RX(0x4998),
|
||||
PSIL_CSI2RX(0x4999),
|
||||
PSIL_CSI2RX(0x499a),
|
||||
PSIL_CSI2RX(0x499b),
|
||||
PSIL_CSI2RX(0x499c),
|
||||
PSIL_CSI2RX(0x499d),
|
||||
PSIL_CSI2RX(0x499e),
|
||||
PSIL_CSI2RX(0x499f),
|
||||
/* MAIN_CPSW9G */
|
||||
PSIL_ETHERNET(0x4a00),
|
||||
/* MAIN-SA2UL */
|
||||
PSIL_SA2UL(0x4a40, 0),
|
||||
PSIL_SA2UL(0x4a41, 0),
|
||||
PSIL_SA2UL(0x4a42, 0),
|
||||
PSIL_SA2UL(0x4a43, 0),
|
||||
/* MCU_CPSW0 */
|
||||
PSIL_ETHERNET(0x7000),
|
||||
/* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
|
||||
PSIL_PDMA_XY_PKT(0x7100),
|
||||
PSIL_PDMA_XY_PKT(0x7101),
|
||||
PSIL_PDMA_XY_PKT(0x7102),
|
||||
PSIL_PDMA_XY_PKT(0x7103),
|
||||
/* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
|
||||
PSIL_PDMA_XY_PKT(0x7200),
|
||||
PSIL_PDMA_XY_PKT(0x7201),
|
||||
PSIL_PDMA_XY_PKT(0x7202),
|
||||
PSIL_PDMA_XY_PKT(0x7203),
|
||||
PSIL_PDMA_XY_PKT(0x7204),
|
||||
PSIL_PDMA_XY_PKT(0x7205),
|
||||
PSIL_PDMA_XY_PKT(0x7206),
|
||||
PSIL_PDMA_XY_PKT(0x7207),
|
||||
/* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
|
||||
PSIL_PDMA_XY_PKT(0x7300),
|
||||
/* MCU_PDMA_ADC - ADC0-1 */
|
||||
PSIL_PDMA_XY_TR(0x7400),
|
||||
PSIL_PDMA_XY_TR(0x7401),
|
||||
PSIL_PDMA_XY_TR(0x7402),
|
||||
PSIL_PDMA_XY_TR(0x7403),
|
||||
/* MCU_SA2UL */
|
||||
PSIL_SA2UL(0x7500, 0),
|
||||
PSIL_SA2UL(0x7501, 0),
|
||||
PSIL_SA2UL(0x7502, 0),
|
||||
PSIL_SA2UL(0x7503, 0),
|
||||
};
|
||||
|
||||
/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
|
||||
static struct psil_ep j784s4_dst_ep_map[] = {
|
||||
/* MAIN_CPSW2G */
|
||||
PSIL_ETHERNET(0xc640),
|
||||
PSIL_ETHERNET(0xc641),
|
||||
PSIL_ETHERNET(0xc642),
|
||||
PSIL_ETHERNET(0xc643),
|
||||
PSIL_ETHERNET(0xc644),
|
||||
PSIL_ETHERNET(0xc645),
|
||||
PSIL_ETHERNET(0xc646),
|
||||
PSIL_ETHERNET(0xc647),
|
||||
/* MAIN_CPSW9G */
|
||||
PSIL_ETHERNET(0xca00),
|
||||
PSIL_ETHERNET(0xca01),
|
||||
PSIL_ETHERNET(0xca02),
|
||||
PSIL_ETHERNET(0xca03),
|
||||
PSIL_ETHERNET(0xca04),
|
||||
PSIL_ETHERNET(0xca05),
|
||||
PSIL_ETHERNET(0xca06),
|
||||
PSIL_ETHERNET(0xca07),
|
||||
/* MAIN-SA2UL */
|
||||
PSIL_SA2UL(0xca40, 1),
|
||||
PSIL_SA2UL(0xca41, 1),
|
||||
/* PDMA_SPI_G0 - SPI0-3 */
|
||||
PSIL_PDMA_XY_PKT(0xc600),
|
||||
PSIL_PDMA_XY_PKT(0xc601),
|
||||
PSIL_PDMA_XY_PKT(0xc602),
|
||||
PSIL_PDMA_XY_PKT(0xc603),
|
||||
PSIL_PDMA_XY_PKT(0xc604),
|
||||
PSIL_PDMA_XY_PKT(0xc605),
|
||||
PSIL_PDMA_XY_PKT(0xc606),
|
||||
PSIL_PDMA_XY_PKT(0xc607),
|
||||
PSIL_PDMA_XY_PKT(0xc608),
|
||||
PSIL_PDMA_XY_PKT(0xc609),
|
||||
PSIL_PDMA_XY_PKT(0xc60a),
|
||||
PSIL_PDMA_XY_PKT(0xc60b),
|
||||
PSIL_PDMA_XY_PKT(0xc60c),
|
||||
PSIL_PDMA_XY_PKT(0xc60d),
|
||||
PSIL_PDMA_XY_PKT(0xc60e),
|
||||
PSIL_PDMA_XY_PKT(0xc60f),
|
||||
/* PDMA_SPI_G1 - SPI4-7 */
|
||||
PSIL_PDMA_XY_PKT(0xc620),
|
||||
PSIL_PDMA_XY_PKT(0xc621),
|
||||
PSIL_PDMA_XY_PKT(0xc622),
|
||||
PSIL_PDMA_XY_PKT(0xc623),
|
||||
PSIL_PDMA_XY_PKT(0xc624),
|
||||
PSIL_PDMA_XY_PKT(0xc625),
|
||||
PSIL_PDMA_XY_PKT(0xc626),
|
||||
PSIL_PDMA_XY_PKT(0xc627),
|
||||
PSIL_PDMA_XY_PKT(0xc628),
|
||||
PSIL_PDMA_XY_PKT(0xc629),
|
||||
PSIL_PDMA_XY_PKT(0xc62a),
|
||||
PSIL_PDMA_XY_PKT(0xc62b),
|
||||
PSIL_PDMA_XY_PKT(0xc62c),
|
||||
PSIL_PDMA_XY_PKT(0xc62d),
|
||||
PSIL_PDMA_XY_PKT(0xc62e),
|
||||
PSIL_PDMA_XY_PKT(0xc62f),
|
||||
/* MCU_CPSW0 */
|
||||
PSIL_ETHERNET(0xf000),
|
||||
PSIL_ETHERNET(0xf001),
|
||||
PSIL_ETHERNET(0xf002),
|
||||
PSIL_ETHERNET(0xf003),
|
||||
PSIL_ETHERNET(0xf004),
|
||||
PSIL_ETHERNET(0xf005),
|
||||
PSIL_ETHERNET(0xf006),
|
||||
PSIL_ETHERNET(0xf007),
|
||||
/* MCU_PDMA_MISC_G0 - SPI0 */
|
||||
PSIL_PDMA_XY_PKT(0xf100),
|
||||
PSIL_PDMA_XY_PKT(0xf101),
|
||||
PSIL_PDMA_XY_PKT(0xf102),
|
||||
PSIL_PDMA_XY_PKT(0xf103),
|
||||
/* MCU_PDMA_MISC_G1 - SPI1-2 */
|
||||
PSIL_PDMA_XY_PKT(0xf200),
|
||||
PSIL_PDMA_XY_PKT(0xf201),
|
||||
PSIL_PDMA_XY_PKT(0xf202),
|
||||
PSIL_PDMA_XY_PKT(0xf203),
|
||||
PSIL_PDMA_XY_PKT(0xf204),
|
||||
PSIL_PDMA_XY_PKT(0xf205),
|
||||
PSIL_PDMA_XY_PKT(0xf206),
|
||||
PSIL_PDMA_XY_PKT(0xf207),
|
||||
/* MCU_SA2UL */
|
||||
PSIL_SA2UL(0xf500, 1),
|
||||
PSIL_SA2UL(0xf501, 1),
|
||||
};
|
||||
|
||||
struct psil_ep_map j784s4_ep_map = {
|
||||
.name = "j784s4",
|
||||
.src = j784s4_src_ep_map,
|
||||
.src_count = ARRAY_SIZE(j784s4_src_ep_map),
|
||||
.dst = j784s4_dst_ep_map,
|
||||
.dst_count = ARRAY_SIZE(j784s4_dst_ep_map),
|
||||
};
|
|
@ -44,5 +44,6 @@ extern struct psil_ep_map am64_ep_map;
|
|||
extern struct psil_ep_map j721s2_ep_map;
|
||||
extern struct psil_ep_map am62_ep_map;
|
||||
extern struct psil_ep_map am62a_ep_map;
|
||||
extern struct psil_ep_map j784s4_ep_map;
|
||||
|
||||
#endif /* K3_PSIL_PRIV_H_ */
|
||||
|
|
|
@ -25,6 +25,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
|
|||
{ .family = "J721S2", .data = &j721s2_ep_map },
|
||||
{ .family = "AM62X", .data = &am62_ep_map },
|
||||
{ .family = "AM62AX", .data = &am62a_ep_map },
|
||||
{ .family = "J784S4", .data = &j784s4_ep_map },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
|
|
|
@ -305,6 +305,8 @@ struct udma_chan {
|
|||
|
||||
/* Channel configuration parameters */
|
||||
struct udma_chan_config config;
|
||||
/* Channel configuration parameters (backup) */
|
||||
struct udma_chan_config backup_config;
|
||||
|
||||
/* dmapool for packet mode descriptors */
|
||||
bool use_dma_pool;
|
||||
|
@ -2964,6 +2966,7 @@ udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
|
|||
struct scatterlist *sgent;
|
||||
struct cppi5_tr_type15_t *tr_req = NULL;
|
||||
enum dma_slave_buswidth dev_width;
|
||||
u32 csf = CPPI5_TR_CSF_SUPR_EVT;
|
||||
u16 tr_cnt0, tr_cnt1;
|
||||
dma_addr_t dev_addr;
|
||||
struct udma_desc *d;
|
||||
|
@ -3034,6 +3037,7 @@ udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
|
|||
|
||||
if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
|
||||
asel = 0;
|
||||
csf |= CPPI5_TR_CSF_EOL_ICNT0;
|
||||
} else {
|
||||
asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
|
||||
dev_addr |= asel;
|
||||
|
@ -3057,7 +3061,7 @@ udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
|
|||
|
||||
cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
|
||||
true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
|
||||
cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
|
||||
cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf);
|
||||
cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
|
||||
uc->config.tr_trigger_type,
|
||||
CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
|
||||
|
@ -3103,8 +3107,7 @@ udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
|
|||
cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
|
||||
false, true,
|
||||
CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
|
||||
cppi5_tr_csf_set(&tr_req[tr_idx].flags,
|
||||
CPPI5_TR_CSF_SUPR_EVT);
|
||||
cppi5_tr_csf_set(&tr_req[tr_idx].flags, csf);
|
||||
cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
|
||||
uc->config.tr_trigger_type,
|
||||
CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
|
||||
|
@ -3148,8 +3151,7 @@ udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
|
|||
d->residue += sg_len;
|
||||
}
|
||||
|
||||
cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
|
||||
CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
|
||||
cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, csf | CPPI5_TR_CSF_EOP);
|
||||
|
||||
return d;
|
||||
}
|
||||
|
@ -3678,6 +3680,7 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||
int num_tr;
|
||||
size_t tr_size = sizeof(struct cppi5_tr_type15_t);
|
||||
u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
|
||||
u32 csf = CPPI5_TR_CSF_SUPR_EVT;
|
||||
|
||||
if (uc->config.dir != DMA_MEM_TO_MEM) {
|
||||
dev_err(chan->device->dev,
|
||||
|
@ -3708,13 +3711,15 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||
if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
|
||||
src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
|
||||
dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
|
||||
} else {
|
||||
csf |= CPPI5_TR_CSF_EOL_ICNT0;
|
||||
}
|
||||
|
||||
tr_req = d->hwdesc[0].tr_req_base;
|
||||
|
||||
cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
|
||||
CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
|
||||
cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
|
||||
cppi5_tr_csf_set(&tr_req[0].flags, csf);
|
||||
|
||||
tr_req[0].addr = src;
|
||||
tr_req[0].icnt0 = tr0_cnt0;
|
||||
|
@ -3733,7 +3738,7 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||
if (num_tr == 2) {
|
||||
cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
|
||||
CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
|
||||
cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
|
||||
cppi5_tr_csf_set(&tr_req[1].flags, csf);
|
||||
|
||||
tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
|
||||
tr_req[1].icnt0 = tr1_cnt0;
|
||||
|
@ -3748,8 +3753,7 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||
tr_req[1].dicnt3 = 1;
|
||||
}
|
||||
|
||||
cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
|
||||
CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
|
||||
cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, csf | CPPI5_TR_CSF_EOP);
|
||||
|
||||
if (uc->config.metadata_size)
|
||||
d->vd.tx.metadata_ops = &metadata_ops;
|
||||
|
@ -4412,6 +4416,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
|
|||
{ .family = "J721S2", .data = &j721e_soc_data},
|
||||
{ .family = "AM62X", .data = &am64_soc_data },
|
||||
{ .family = "AM62AX", .data = &am64_soc_data },
|
||||
{ .family = "J784S4", .data = &j721e_soc_data },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
|
@ -5522,11 +5527,63 @@ static int udma_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int udma_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct udma_dev *ud = dev_get_drvdata(dev);
|
||||
struct dma_device *dma_dev = &ud->ddev;
|
||||
struct dma_chan *chan;
|
||||
struct udma_chan *uc;
|
||||
|
||||
list_for_each_entry(chan, &dma_dev->channels, device_node) {
|
||||
if (chan->client_count) {
|
||||
uc = to_udma_chan(chan);
|
||||
/* backup the channel configuration */
|
||||
memcpy(&uc->backup_config, &uc->config,
|
||||
sizeof(struct udma_chan_config));
|
||||
dev_dbg(dev, "Suspending channel %s\n",
|
||||
dma_chan_name(chan));
|
||||
ud->ddev.device_free_chan_resources(chan);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int udma_pm_resume(struct device *dev)
|
||||
{
|
||||
struct udma_dev *ud = dev_get_drvdata(dev);
|
||||
struct dma_device *dma_dev = &ud->ddev;
|
||||
struct dma_chan *chan;
|
||||
struct udma_chan *uc;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(chan, &dma_dev->channels, device_node) {
|
||||
if (chan->client_count) {
|
||||
uc = to_udma_chan(chan);
|
||||
/* restore the channel configuration */
|
||||
memcpy(&uc->config, &uc->backup_config,
|
||||
sizeof(struct udma_chan_config));
|
||||
dev_dbg(dev, "Resuming channel %s\n",
|
||||
dma_chan_name(chan));
|
||||
ret = ud->ddev.device_alloc_chan_resources(chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops udma_pm_ops = {
|
||||
SET_LATE_SYSTEM_SLEEP_PM_OPS(udma_pm_suspend, udma_pm_resume)
|
||||
};
|
||||
|
||||
static struct platform_driver udma_driver = {
|
||||
.driver = {
|
||||
.name = "ti-udma",
|
||||
.of_match_table = udma_of_match,
|
||||
.suppress_bind_attrs = true,
|
||||
.pm = &udma_pm_ops,
|
||||
},
|
||||
.probe = udma_probe,
|
||||
};
|
||||
|
|
|
@ -1060,7 +1060,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
|
|||
zdev->dev = &pdev->dev;
|
||||
INIT_LIST_HEAD(&zdev->common.channels);
|
||||
|
||||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
|
||||
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "DMA not available for address range\n");
|
||||
return ret;
|
||||
}
|
||||
dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
|
||||
|
||||
p = &zdev->common;
|
||||
|
|
|
@ -616,6 +616,7 @@ static inline void *cppi5_hdesc_get_swdata(struct cppi5_host_desc_t *desc)
|
|||
#define CPPI5_TR_CSF_SUPR_EVT BIT(2)
|
||||
#define CPPI5_TR_CSF_EOL_ADV_SHIFT (4U)
|
||||
#define CPPI5_TR_CSF_EOL_ADV_MASK GENMASK(6, 4)
|
||||
#define CPPI5_TR_CSF_EOL_ICNT0 BIT(4)
|
||||
#define CPPI5_TR_CSF_EOP BIT(7)
|
||||
|
||||
/**
|
||||
|
|
|
@ -30,6 +30,7 @@ enum idxd_scmd_stat {
|
|||
IDXD_SCMD_WQ_NO_PRIV = 0x800f0000,
|
||||
IDXD_SCMD_WQ_IRQ_ERR = 0x80100000,
|
||||
IDXD_SCMD_WQ_USER_NO_IOMMU = 0x80110000,
|
||||
IDXD_SCMD_DEV_EVL_ERR = 0x80120000,
|
||||
};
|
||||
|
||||
#define IDXD_SCMD_SOFTERR_MASK 0x80000000
|
||||
|
@ -72,12 +73,14 @@ enum dsa_opcode {
|
|||
DSA_OPCODE_CR_DELTA,
|
||||
DSA_OPCODE_AP_DELTA,
|
||||
DSA_OPCODE_DUALCAST,
|
||||
DSA_OPCODE_TRANSL_FETCH,
|
||||
DSA_OPCODE_CRCGEN = 0x10,
|
||||
DSA_OPCODE_COPY_CRC,
|
||||
DSA_OPCODE_DIF_CHECK,
|
||||
DSA_OPCODE_DIF_INS,
|
||||
DSA_OPCODE_DIF_STRP,
|
||||
DSA_OPCODE_DIF_UPDT,
|
||||
DSA_OPCODE_DIX_GEN = 0x17,
|
||||
DSA_OPCODE_CFLUSH = 0x20,
|
||||
};
|
||||
|
||||
|
@ -132,6 +135,8 @@ enum dsa_completion_status {
|
|||
DSA_COMP_HW_ERR1,
|
||||
DSA_COMP_HW_ERR_DRB,
|
||||
DSA_COMP_TRANSLATION_FAIL,
|
||||
DSA_COMP_DRAIN_EVL = 0x26,
|
||||
DSA_COMP_BATCH_EVL_ERR,
|
||||
};
|
||||
|
||||
enum iax_completion_status {
|
||||
|
@ -167,6 +172,7 @@ enum iax_completion_status {
|
|||
|
||||
#define DSA_COMP_STATUS_MASK 0x7f
|
||||
#define DSA_COMP_STATUS_WRITE 0x80
|
||||
#define DSA_COMP_STATUS(status) ((status) & DSA_COMP_STATUS_MASK)
|
||||
|
||||
struct dsa_hw_desc {
|
||||
uint32_t pasid:20;
|
||||
|
@ -180,6 +186,8 @@ struct dsa_hw_desc {
|
|||
uint64_t rdback_addr;
|
||||
uint64_t pattern;
|
||||
uint64_t desc_list_addr;
|
||||
uint64_t pattern_lower;
|
||||
uint64_t transl_fetch_addr;
|
||||
};
|
||||
union {
|
||||
uint64_t dst_addr;
|
||||
|
@ -190,6 +198,7 @@ struct dsa_hw_desc {
|
|||
union {
|
||||
uint32_t xfer_size;
|
||||
uint32_t desc_count;
|
||||
uint32_t region_size;
|
||||
};
|
||||
uint16_t int_handle;
|
||||
uint16_t rsvd1;
|
||||
|
@ -244,6 +253,26 @@ struct dsa_hw_desc {
|
|||
uint16_t dest_app_tag_seed;
|
||||
};
|
||||
|
||||
/* Fill */
|
||||
uint64_t pattern_upper;
|
||||
|
||||
/* Translation fetch */
|
||||
struct {
|
||||
uint64_t transl_fetch_res;
|
||||
uint32_t region_stride;
|
||||
};
|
||||
|
||||
/* DIX generate */
|
||||
struct {
|
||||
uint8_t dix_gen_res;
|
||||
uint8_t dest_dif_flags;
|
||||
uint8_t dif_flags;
|
||||
uint8_t dix_gen_res2[13];
|
||||
uint32_t ref_tag_seed;
|
||||
uint16_t app_tag_mask;
|
||||
uint16_t app_tag_seed;
|
||||
};
|
||||
|
||||
uint8_t op_specific[24];
|
||||
};
|
||||
} __attribute__((packed));
|
||||
|
@ -284,8 +313,12 @@ struct dsa_completion_record {
|
|||
uint8_t result;
|
||||
uint8_t dif_status;
|
||||
};
|
||||
uint16_t rsvd;
|
||||
uint32_t bytes_completed;
|
||||
uint8_t fault_info;
|
||||
uint8_t rsvd;
|
||||
union {
|
||||
uint32_t bytes_completed;
|
||||
uint32_t descs_completed;
|
||||
};
|
||||
uint64_t fault_addr;
|
||||
union {
|
||||
/* common record */
|
||||
|
@ -322,6 +355,14 @@ struct dsa_completion_record {
|
|||
uint16_t dif_upd_dest_app_tag;
|
||||
};
|
||||
|
||||
/* DIX generate */
|
||||
struct {
|
||||
uint64_t dix_gen_res;
|
||||
uint32_t dix_ref_tag;
|
||||
uint16_t dix_app_tag_mask;
|
||||
uint16_t dix_app_tag;
|
||||
};
|
||||
|
||||
uint8_t op_specific[16];
|
||||
};
|
||||
} __attribute__((packed));
|
||||
|
@ -333,7 +374,8 @@ struct dsa_raw_completion_record {
|
|||
struct iax_completion_record {
|
||||
volatile uint8_t status;
|
||||
uint8_t error_code;
|
||||
uint16_t rsvd;
|
||||
uint8_t fault_info;
|
||||
uint8_t rsvd;
|
||||
uint32_t bytes_completed;
|
||||
uint64_t fault_addr;
|
||||
uint32_t invalid_flags;
|
||||
|
|
Loading…
Reference in New Issue