Char / Misc driver updates for 5.11-rc1
Here is the big char/misc driver update for 5.11-rc1. Continuing the tradition of previous -rc1 pulls, there seems to be more and more tiny driver subsystems flowing through this tree. Lots of different things, all of which have been in linux-next for a while with no reported issues: - extcon driver updates - habannalab driver updates - mei driver updates - uio driver updates - binder fixes and features added - soundwire driver updates - mhi bus driver updates - phy driver updates - coresight driver updates - fpga driver updates - speakup driver updates - slimbus driver updates - various small char and misc driver updates Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> -----BEGIN PGP SIGNATURE----- iG0EABECAC0WIQT0tgzFv3jCIUoxPcsxR9QN2y37KQUCX9iDZA8cZ3JlZ0Brcm9h aC5jb20ACgkQMUfUDdst+ylRMACgqxKS2CUcY8tPnR5weHEsbz6O+KAAn3BtEFnK 7V9EnSuZe4L1jNOHOB5V =xzHh -----END PGP SIGNATURE----- Merge tag 'char-misc-5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc Pull char / misc driver updates from Greg KH: "Here is the big char/misc driver update for 5.11-rc1. Continuing the tradition of previous -rc1 pulls, there seems to be more and more tiny driver subsystems flowing through this tree. Lots of different things, all of which have been in linux-next for a while with no reported issues: - extcon driver updates - habannalab driver updates - mei driver updates - uio driver updates - binder fixes and features added - soundwire driver updates - mhi bus driver updates - phy driver updates - coresight driver updates - fpga driver updates - speakup driver updates - slimbus driver updates - various small char and misc driver updates" * tag 'char-misc-5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (305 commits) extcon: max77693: Fix modalias string extcon: fsa9480: Support TI TSU6111 variant extcon: fsa9480: Rewrite bindings in YAML and extend dt-bindings: extcon: add binding for TUSB320 extcon: Add driver for TI TUSB320 slimbus: qcom: fix potential NULL dereference in qcom_slim_prg_slew() siox: Make remove callback return void siox: Use bus_type functions for probe, remove and shutdown spmi: Add driver shutdown support spmi: fix some coding style issues at the spmi core spmi: get rid of a warning when built with W=1 uio: uio_hv_generic: use devm_kzalloc() for private data alloc uio: uio_fsl_elbc_gpcm: use device-managed allocators uio: uio_aec: use devm_kzalloc() for uio_info object uio: uio_cif: use devm_kzalloc() for uio_info object uio: uio_netx: use devm_kzalloc() for or uio_info object uio: uio_mf624: use devm_kzalloc() for uio_info object uio: uio_sercos3: use device-managed functions for simple allocs uio: uio_dmem_genirq: finalize conversion of probe to devm_ handlers uio: uio_dmem_genirq: convert simple allocations to device-managed ...
This commit is contained in:
commit
2911ed9f47
|
@ -14,7 +14,7 @@ Users: any user space application which wants to communicate with
|
|||
w1_term device
|
||||
|
||||
|
||||
What: /sys/bus/w1/devices/.../eeprom
|
||||
What: /sys/bus/w1/devices/.../eeprom_cmd
|
||||
Date: May 2020
|
||||
Contact: Akira Shimahara <akira215corp@gmail.com>
|
||||
Description:
|
||||
|
|
|
@ -344,6 +344,7 @@ spk key_slash = say_attributes
|
|||
spk key_8 = speakup_paste
|
||||
shift spk key_m = say_first_char
|
||||
ctrl spk key_semicolon = say_last_char
|
||||
spk key_r = read_all_doc
|
||||
|
||||
5. The Speakup Sys System
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ required:
|
|||
|
||||
patternProperties:
|
||||
"^usb-phy@[a-f0-9]+$":
|
||||
allOf: [ $ref: "../usb/ingenic,jz4770-phy.yaml#" ]
|
||||
allOf: [ $ref: "../phy/ingenic,phy-usb.yaml#" ]
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
FAIRCHILD SEMICONDUCTOR FSA9480 MICROUSB SWITCH
|
||||
|
||||
The FSA9480 is a USB port accessory detector and switch. The FSA9480 is fully
|
||||
controlled using I2C and enables USB data, stereo and mono audio, video,
|
||||
microphone, and UART data to use a common connector port.
|
||||
|
||||
Required properties:
|
||||
- compatible : Must be one of
|
||||
"fcs,fsa9480"
|
||||
"fcs,fsa880"
|
||||
- reg : Specifies i2c slave address. Must be 0x25.
|
||||
- interrupts : Should contain one entry specifying interrupt signal of
|
||||
interrupt parent to which interrupt pin of the chip is connected.
|
||||
|
||||
Example:
|
||||
musb@25 {
|
||||
compatible = "fcs,fsa9480";
|
||||
reg = <0x25>;
|
||||
interrupt-parent = <&gph2>;
|
||||
interrupts = <7 0>;
|
||||
};
|
|
@ -0,0 +1,41 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/extcon/extcon-usbc-tusb320.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: TI TUSB320 USB Type-C CC Logic controller
|
||||
|
||||
maintainers:
|
||||
- Michael Auchter <michael.auchter@ni.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: ti,tusb320
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
i2c0 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
tusb320@61 {
|
||||
compatible = "ti,tusb320";
|
||||
reg = <0x61>;
|
||||
interrupt-parent = <&gpio>;
|
||||
interrupts = <27 1>;
|
||||
};
|
||||
};
|
||||
...
|
|
@ -0,0 +1,52 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/extcon/fcs,fsa880.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Fairchild Semiconductor FSA880, FSA9480 and compatibles
|
||||
|
||||
maintainers:
|
||||
- Linus Walleij <linus.walleij@linaro.org>
|
||||
|
||||
description:
|
||||
The FSA880 and FSA9480 are USB port accessory detectors and switches.
|
||||
The switch is fully controlled using I2C and enables USB data, stereo
|
||||
and mono audio, video, microphone, and UART data to use a common
|
||||
connector port. Compatible switches exist from other manufacturers.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- fcs,fsa880
|
||||
- fcs,fsa9480
|
||||
- ti,tsu6111
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
description: The I2C address for an FSA880 compatible device is
|
||||
usually 0x25.
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
i2c {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
usb-switch@25 {
|
||||
compatible = "fcs,fsa880";
|
||||
reg = <0x25>;
|
||||
interrupt-parent = <&gpio>;
|
||||
interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
|
||||
};
|
||||
};
|
|
@ -7,6 +7,7 @@ Required properties:
|
|||
"mediatek,mt7622-efuse", "mediatek,efuse": for MT7622
|
||||
"mediatek,mt7623-efuse", "mediatek,efuse": for MT7623
|
||||
"mediatek,mt8173-efuse" or "mediatek,efuse": for MT8173
|
||||
"mediatek,mt8516-efuse", "mediatek,efuse": for MT8516
|
||||
- reg: Should contain registers location and length
|
||||
|
||||
= Data cells =
|
||||
|
|
|
@ -14,7 +14,18 @@ allOf:
|
|||
|
||||
properties:
|
||||
compatible:
|
||||
const: qcom,qfprom
|
||||
items:
|
||||
- enum:
|
||||
- qcom,apq8064-qfprom
|
||||
- qcom,apq8084-qfprom
|
||||
- qcom,msm8974-qfprom
|
||||
- qcom,msm8916-qfprom
|
||||
- qcom,msm8996-qfprom
|
||||
- qcom,msm8998-qfprom
|
||||
- qcom,qcs404-qfprom
|
||||
- qcom,sc7180-qfprom
|
||||
- qcom,sdm845-qfprom
|
||||
- const: qcom,qfprom
|
||||
|
||||
reg:
|
||||
# If the QFPROM is read-only OS image then only the corrected region
|
||||
|
@ -60,7 +71,7 @@ examples:
|
|||
#size-cells = <2>;
|
||||
|
||||
efuse@784000 {
|
||||
compatible = "qcom,qfprom";
|
||||
compatible = "qcom,sc7180-qfprom", "qcom,qfprom";
|
||||
reg = <0 0x00784000 0 0x8ff>,
|
||||
<0 0x00780000 0 0x7a0>,
|
||||
<0 0x00782000 0 0x100>,
|
||||
|
@ -85,7 +96,7 @@ examples:
|
|||
#size-cells = <2>;
|
||||
|
||||
efuse@784000 {
|
||||
compatible = "qcom,qfprom";
|
||||
compatible = "qcom,sdm845-qfprom", "qcom,qfprom";
|
||||
reg = <0 0x00784000 0 0x8ff>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
|
||||
# Copyright 2020 BayLibre, SAS
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: "http://devicetree.org/schemas/phy/amlogic,axg-mipi-dphy.yaml#"
|
||||
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
|
||||
|
||||
title: Amlogic AXG MIPI D-PHY
|
||||
|
||||
maintainers:
|
||||
- Neil Armstrong <narmstrong@baylibre.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- amlogic,axg-mipi-dphy
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: pclk
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
reset-names:
|
||||
items:
|
||||
- const: phy
|
||||
|
||||
"#phy-cells":
|
||||
const: 0
|
||||
|
||||
phys:
|
||||
maxItems: 1
|
||||
|
||||
phy-names:
|
||||
items:
|
||||
- const: analog
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
- clock-names
|
||||
- resets
|
||||
- reset-names
|
||||
- phys
|
||||
- phy-names
|
||||
- "#phy-cells"
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
phy@ff640000 {
|
||||
compatible = "amlogic,axg-mipi-dphy";
|
||||
reg = <0xff640000 0x100>;
|
||||
clocks = <&clk_mipi_dsi_phy>;
|
||||
clock-names = "pclk";
|
||||
resets = <&reset_phy>;
|
||||
reset-names = "phy";
|
||||
phys = <&mipi_pcie_analog_dphy>;
|
||||
phy-names = "analog";
|
||||
#phy-cells = <0>;
|
||||
};
|
|
@ -9,27 +9,32 @@ title: Amlogic AXG shared MIPI/PCIE analog PHY
|
|||
maintainers:
|
||||
- Remi Pommarel <repk@triplefau.lt>
|
||||
|
||||
description: |+
|
||||
The Everything-Else Power Domains node should be the child of a syscon
|
||||
node with the required property:
|
||||
|
||||
- compatible: Should be the following:
|
||||
"amlogic,meson-gx-hhi-sysctrl", "simple-mfd", "syscon"
|
||||
|
||||
Refer to the the bindings described in
|
||||
Documentation/devicetree/bindings/mfd/syscon.yaml
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: amlogic,axg-mipi-pcie-analog-phy
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
"#phy-cells":
|
||||
const: 1
|
||||
const: 0
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- "#phy-cells"
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
mpphy: phy@0 {
|
||||
mpphy: phy {
|
||||
compatible = "amlogic,axg-mipi-pcie-analog-phy";
|
||||
reg = <0x0 0xc>;
|
||||
#phy-cells = <1>;
|
||||
#phy-cells = <0>;
|
||||
};
|
||||
|
|
|
@ -0,0 +1,148 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: "http://devicetree.org/schemas/phy/brcm,sata-phy.yaml#"
|
||||
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
|
||||
|
||||
title: Broadcom SATA3 PHY
|
||||
|
||||
maintainers:
|
||||
- Florian Fainelli <f.fainelli@gmail.com>
|
||||
|
||||
properties:
|
||||
$nodename:
|
||||
pattern: "^sata[-|_]phy(@.*)?$"
|
||||
|
||||
compatible:
|
||||
oneOf:
|
||||
- items:
|
||||
- enum:
|
||||
- brcm,bcm7216-sata-phy
|
||||
- brcm,bcm7425-sata-phy
|
||||
- brcm,bcm7445-sata-phy
|
||||
- brcm,bcm63138-sata-phy
|
||||
- const: brcm,phy-sata3
|
||||
- items:
|
||||
- const: brcm,iproc-nsp-sata-phy
|
||||
- items:
|
||||
- const: brcm,iproc-ns2-sata-phy
|
||||
- items:
|
||||
- const: brcm,iproc-sr-sata-phy
|
||||
|
||||
reg:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
reg-names:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
items:
|
||||
- const: phy
|
||||
- const: phy-ctrl
|
||||
|
||||
"#address-cells":
|
||||
const: 1
|
||||
|
||||
"#size-cells":
|
||||
const: 0
|
||||
|
||||
patternProperties:
|
||||
"^sata-phy@[0-9]+$":
|
||||
type: object
|
||||
description: |
|
||||
Each port's PHY should be represented as a sub-node.
|
||||
|
||||
properties:
|
||||
reg:
|
||||
description: The SATA PHY port number
|
||||
maxItems: 1
|
||||
|
||||
"#phy-cells":
|
||||
const: 0
|
||||
|
||||
"brcm,enable-ssc":
|
||||
$ref: /schemas/types.yaml#/definitions/flag
|
||||
description: |
|
||||
Use spread spectrum clocking (SSC) on this port
|
||||
This property is not applicable for "brcm,iproc-ns2-sata-phy",
|
||||
"brcm,iproc-nsp-sata-phy" and "brcm,iproc-sr-sata-phy".
|
||||
|
||||
"brcm,rxaeq-mode":
|
||||
$ref: /schemas/types.yaml#/definitions/string
|
||||
description:
|
||||
String that indicates the desired RX equalizer mode.
|
||||
enum:
|
||||
- off
|
||||
- auto
|
||||
- manual
|
||||
|
||||
"brcm,rxaeq-value":
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
description: |
|
||||
When 'brcm,rxaeq-mode' is set to "manual", provides the RX
|
||||
equalizer value that should be used.
|
||||
minimum: 0
|
||||
maximum: 63
|
||||
|
||||
"brcm,tx-amplitude-millivolt":
|
||||
description: |
|
||||
Transmit amplitude voltage in millivolt.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
enum: [400, 500, 600, 800]
|
||||
|
||||
required:
|
||||
- reg
|
||||
- "#phy-cells"
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
if:
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
const: brcm,iproc-ns2-sata-phy
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
maxItems: 2
|
||||
reg-names:
|
||||
items:
|
||||
- const: "phy"
|
||||
- const: "phy-ctrl"
|
||||
else:
|
||||
properties:
|
||||
reg:
|
||||
maxItems: 1
|
||||
reg-names:
|
||||
maxItems: 1
|
||||
items:
|
||||
- const: "phy"
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- "#address-cells"
|
||||
- "#size-cells"
|
||||
- reg
|
||||
- reg-names
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
sata_phy@f0458100 {
|
||||
compatible = "brcm,bcm7445-sata-phy", "brcm,phy-sata3";
|
||||
reg = <0xf0458100 0x1e00>;
|
||||
reg-names = "phy";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
sata-phy@0 {
|
||||
reg = <0>;
|
||||
#phy-cells = <0>;
|
||||
};
|
||||
|
||||
sata-phy@1 {
|
||||
reg = <1>;
|
||||
#phy-cells = <0>;
|
||||
};
|
||||
};
|
|
@ -1,58 +0,0 @@
|
|||
* Broadcom SATA3 PHY
|
||||
|
||||
Required properties:
|
||||
- compatible: should be one or more of
|
||||
"brcm,bcm7216-sata-phy"
|
||||
"brcm,bcm7425-sata-phy"
|
||||
"brcm,bcm7445-sata-phy"
|
||||
"brcm,iproc-ns2-sata-phy"
|
||||
"brcm,iproc-nsp-sata-phy"
|
||||
"brcm,phy-sata3"
|
||||
"brcm,iproc-sr-sata-phy"
|
||||
"brcm,bcm63138-sata-phy"
|
||||
- address-cells: should be 1
|
||||
- size-cells: should be 0
|
||||
- reg: register ranges for the PHY PCB interface
|
||||
- reg-names: should be "phy" and "phy-ctrl"
|
||||
The "phy-ctrl" registers are only required for
|
||||
"brcm,iproc-ns2-sata-phy" and "brcm,iproc-sr-sata-phy".
|
||||
|
||||
Sub-nodes:
|
||||
Each port's PHY should be represented as a sub-node.
|
||||
|
||||
Sub-nodes required properties:
|
||||
- reg: the PHY number
|
||||
- phy-cells: generic PHY binding; must be 0
|
||||
|
||||
Sub-nodes optional properties:
|
||||
- brcm,enable-ssc: use spread spectrum clocking (SSC) on this port
|
||||
This property is not applicable for "brcm,iproc-ns2-sata-phy",
|
||||
"brcm,iproc-nsp-sata-phy" and "brcm,iproc-sr-sata-phy".
|
||||
|
||||
- brcm,rxaeq-mode: string that indicates the desired RX equalizer
|
||||
mode, possible values are:
|
||||
"off" (equivalent to not specifying the property)
|
||||
"auto"
|
||||
"manual" (brcm,rxaeq-value is used in that case)
|
||||
|
||||
- brcm,rxaeq-value: when 'rxaeq-mode' is set to "manual", provides the RX
|
||||
equalizer value that should be used. Allowed range is 0..63.
|
||||
|
||||
Example
|
||||
sata-phy@f0458100 {
|
||||
compatible = "brcm,bcm7445-sata-phy", "brcm,phy-sata3";
|
||||
reg = <0xf0458100 0x1e00>, <0xf045804c 0x10>;
|
||||
reg-names = "phy";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
sata-phy@0 {
|
||||
reg = <0>;
|
||||
#phy-cells = <0>;
|
||||
};
|
||||
|
||||
sata-phy@1 {
|
||||
reg = <1>;
|
||||
#phy-cells = <0>;
|
||||
};
|
||||
};
|
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/usb/ingenic,jz4770-phy.yaml#
|
||||
$id: http://devicetree.org/schemas/phy/ingenic,phy-usb.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Ingenic SoCs USB PHY devicetree bindings
|
||||
|
@ -17,9 +17,11 @@ properties:
|
|||
compatible:
|
||||
enum:
|
||||
- ingenic,jz4770-phy
|
||||
- ingenic,jz4775-phy
|
||||
- ingenic,jz4780-phy
|
||||
- ingenic,x1000-phy
|
||||
- ingenic,x1830-phy
|
||||
- ingenic,x2000-phy
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
|
@ -0,0 +1,44 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/phy/intel,phy-keembay-usb.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Intel Keem Bay USB PHY bindings
|
||||
|
||||
maintainers:
|
||||
- Wan Ahmad Zainie <wan.ahmad.zainie.wan.mohamad@intel.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: intel,keembay-usb-phy
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: USB APB CPR (clock, power, reset) register
|
||||
- description: USB APB slave register
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: cpr-apb-base
|
||||
- const: slv-apb-base
|
||||
|
||||
'#phy-cells':
|
||||
const: 0
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- '#phy-cells'
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
usb-phy@20400000 {
|
||||
compatible = "intel,keembay-usb-phy";
|
||||
reg = <0x20400000 0x1c>,
|
||||
<0x20480000 0xd0>;
|
||||
reg-names = "cpr-apb-base", "slv-apb-base";
|
||||
#phy-cells = <0>;
|
||||
};
|
|
@ -1,4 +1,4 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
# SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
|
||||
# Copyright 2019 Lubomir Rintel <lkundrak@v3.sk>
|
||||
%YAML 1.2
|
||||
---
|
||||
|
@ -18,27 +18,20 @@ properties:
|
|||
maxItems: 1
|
||||
description: base address of the device
|
||||
|
||||
reset-gpios:
|
||||
maxItems: 1
|
||||
description: GPIO connected to reset
|
||||
|
||||
"#phy-cells":
|
||||
const: 0
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- reset-gpios
|
||||
- "#phy-cells"
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
hsic-phy@f0001800 {
|
||||
compatible = "marvell,mmp3-hsic-phy";
|
||||
reg = <0xf0001800 0x40>;
|
||||
reset-gpios = <&gpio 63 GPIO_ACTIVE_HIGH>;
|
||||
#phy-cells = <0>;
|
||||
};
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: "http://devicetree.org/schemas/phy/mediatek,mt7621-pci-phy.yaml#"
|
|
@ -1,70 +0,0 @@
|
|||
Cadence Sierra PHY
|
||||
-----------------------
|
||||
|
||||
Required properties:
|
||||
- compatible: Must be "cdns,sierra-phy-t0" for Sierra in Cadence platform
|
||||
Must be "ti,sierra-phy-t0" for Sierra in TI's J721E SoC.
|
||||
- resets: Must contain an entry for each in reset-names.
|
||||
See ../reset/reset.txt for details.
|
||||
- reset-names: Must include "sierra_reset" and "sierra_apb".
|
||||
"sierra_reset" must control the reset line to the PHY.
|
||||
"sierra_apb" must control the reset line to the APB PHY
|
||||
interface ("sierra_apb" is optional).
|
||||
- reg: register range for the PHY.
|
||||
- #address-cells: Must be 1
|
||||
- #size-cells: Must be 0
|
||||
|
||||
Optional properties:
|
||||
- clocks: Must contain an entry in clock-names.
|
||||
See ../clocks/clock-bindings.txt for details.
|
||||
- clock-names: Must contain "cmn_refclk_dig_div" and
|
||||
"cmn_refclk1_dig_div" for configuring the frequency of
|
||||
the clock to the lanes. "phy_clk" is deprecated.
|
||||
- cdns,autoconf: A boolean property whose presence indicates that the
|
||||
PHY registers will be configured by hardware. If not
|
||||
present, all sub-node optional properties must be
|
||||
provided.
|
||||
|
||||
Sub-nodes:
|
||||
Each group of PHY lanes with a single master lane should be represented as
|
||||
a sub-node. Note that the actual configuration of each lane is determined by
|
||||
hardware strapping, and must match the configuration specified here.
|
||||
|
||||
Sub-node required properties:
|
||||
- #phy-cells: Generic PHY binding; must be 0.
|
||||
- reg: The master lane number. This is the lowest numbered lane
|
||||
in the lane group.
|
||||
- resets: Must contain one entry which controls the reset line for the
|
||||
master lane of the sub-node.
|
||||
See ../reset/reset.txt for details.
|
||||
|
||||
Sub-node optional properties:
|
||||
- cdns,num-lanes: Number of lanes in this group. From 1 to 4. The
|
||||
group is made up of consecutive lanes.
|
||||
- cdns,phy-type: Can be PHY_TYPE_PCIE or PHY_TYPE_USB3, depending on
|
||||
configuration of lanes.
|
||||
|
||||
Example:
|
||||
pcie_phy4: pcie-phy@fd240000 {
|
||||
compatible = "cdns,sierra-phy-t0";
|
||||
reg = <0x0 0xfd240000 0x0 0x40000>;
|
||||
resets = <&phyrst 0>, <&phyrst 1>;
|
||||
reset-names = "sierra_reset", "sierra_apb";
|
||||
clocks = <&phyclock>;
|
||||
clock-names = "phy_clk";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
pcie0_phy0: pcie-phy@0 {
|
||||
reg = <0>;
|
||||
resets = <&phyrst 2>;
|
||||
cdns,num-lanes = <2>;
|
||||
#phy-cells = <0>;
|
||||
cdns,phy-type = <PHY_TYPE_PCIE>;
|
||||
};
|
||||
pcie0_phy1: pcie-phy@2 {
|
||||
reg = <2>;
|
||||
resets = <&phyrst 4>;
|
||||
cdns,num-lanes = <1>;
|
||||
#phy-cells = <0>;
|
||||
cdns,phy-type = <PHY_TYPE_PCIE>;
|
||||
};
|
|
@ -0,0 +1,152 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: "http://devicetree.org/schemas/phy/phy-cadence-sierra.yaml#"
|
||||
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
|
||||
|
||||
title: Cadence Sierra PHY binding
|
||||
|
||||
description:
|
||||
This binding describes the Cadence Sierra PHY. Sierra PHY supports multilink
|
||||
multiprotocol combinations including protocols such as PCIe, USB etc.
|
||||
|
||||
maintainers:
|
||||
- Swapnil Jakhade <sjakhade@cadence.com>
|
||||
- Yuti Amonkar <yamonkar@cadence.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- cdns,sierra-phy-t0
|
||||
- ti,sierra-phy-t0
|
||||
|
||||
'#address-cells':
|
||||
const: 1
|
||||
|
||||
'#size-cells':
|
||||
const: 0
|
||||
|
||||
resets:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
items:
|
||||
- description: Sierra PHY reset.
|
||||
- description: Sierra APB reset. This is optional.
|
||||
|
||||
reset-names:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
items:
|
||||
- const: sierra_reset
|
||||
- const: sierra_apb
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
description:
|
||||
Offset of the Sierra PHY configuration registers.
|
||||
|
||||
reg-names:
|
||||
const: serdes
|
||||
|
||||
clocks:
|
||||
maxItems: 2
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: cmn_refclk_dig_div
|
||||
- const: cmn_refclk1_dig_div
|
||||
|
||||
cdns,autoconf:
|
||||
type: boolean
|
||||
description:
|
||||
A boolean property whose presence indicates that the PHY registers will be
|
||||
configured by hardware. If not present, all sub-node optional properties
|
||||
must be provided.
|
||||
|
||||
patternProperties:
|
||||
'^phy@[0-9a-f]$':
|
||||
type: object
|
||||
description:
|
||||
Each group of PHY lanes with a single master lane should be represented as
|
||||
a sub-node. Note that the actual configuration of each lane is determined
|
||||
by hardware strapping, and must match the configuration specified here.
|
||||
properties:
|
||||
reg:
|
||||
description:
|
||||
The master lane number. This is the lowest numbered lane in the lane group.
|
||||
minimum: 0
|
||||
maximum: 15
|
||||
|
||||
resets:
|
||||
minItems: 1
|
||||
maxItems: 4
|
||||
description:
|
||||
Contains list of resets, one per lane, to get all the link lanes out of reset.
|
||||
|
||||
"#phy-cells":
|
||||
const: 0
|
||||
|
||||
cdns,phy-type:
|
||||
description:
|
||||
Specifies the type of PHY for which the group of PHY lanes is used.
|
||||
Refer include/dt-bindings/phy/phy.h. Constants from the header should be used.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
enum: [2, 4]
|
||||
|
||||
cdns,num-lanes:
|
||||
description:
|
||||
Number of lanes in this group. The group is made up of consecutive lanes.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
minimum: 1
|
||||
maximum: 16
|
||||
|
||||
required:
|
||||
- reg
|
||||
- resets
|
||||
- "#phy-cells"
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- "#address-cells"
|
||||
- "#size-cells"
|
||||
- reg
|
||||
- resets
|
||||
- reset-names
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/phy/phy.h>
|
||||
|
||||
bus {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
sierra-phy@fd240000 {
|
||||
compatible = "cdns,sierra-phy-t0";
|
||||
reg = <0x0 0xfd240000 0x0 0x40000>;
|
||||
resets = <&phyrst 0>, <&phyrst 1>;
|
||||
reset-names = "sierra_reset", "sierra_apb";
|
||||
clocks = <&cmn_refclk_dig_div>, <&cmn_refclk1_dig_div>;
|
||||
clock-names = "cmn_refclk_dig_div", "cmn_refclk1_dig_div";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
pcie0_phy0: phy@0 {
|
||||
reg = <0>;
|
||||
resets = <&phyrst 2>;
|
||||
cdns,num-lanes = <2>;
|
||||
#phy-cells = <0>;
|
||||
cdns,phy-type = <PHY_TYPE_PCIE>;
|
||||
};
|
||||
pcie0_phy1: phy@2 {
|
||||
reg = <2>;
|
||||
resets = <&phyrst 4>;
|
||||
cdns,num-lanes = <1>;
|
||||
#phy-cells = <0>;
|
||||
cdns,phy-type = <PHY_TYPE_PCIE>;
|
||||
};
|
||||
};
|
||||
};
|
|
@ -1,73 +0,0 @@
|
|||
STMicroelectronics STM32 USB HS PHY controller
|
||||
|
||||
The STM32 USBPHYC block contains a dual port High Speed UTMI+ PHY and a UTMI
|
||||
switch. It controls PHY configuration and status, and the UTMI+ switch that
|
||||
selects either OTG or HOST controller for the second PHY port. It also sets
|
||||
PLL configuration.
|
||||
|
||||
USBPHYC
|
||||
|_ PLL
|
||||
|
|
||||
|_ PHY port#1 _________________ HOST controller
|
||||
| _ |
|
||||
| / 1|________________|
|
||||
|_ PHY port#2 ----| |________________
|
||||
| \_0| |
|
||||
|_ UTMI switch_______| OTG controller
|
||||
|
||||
|
||||
Phy provider node
|
||||
=================
|
||||
|
||||
Required properties:
|
||||
- compatible: must be "st,stm32mp1-usbphyc"
|
||||
- reg: address and length of the usb phy control register set
|
||||
- clocks: phandle + clock specifier for the PLL phy clock
|
||||
- #address-cells: number of address cells for phys sub-nodes, must be <1>
|
||||
- #size-cells: number of size cells for phys sub-nodes, must be <0>
|
||||
|
||||
Optional properties:
|
||||
- assigned-clocks: phandle + clock specifier for the PLL phy clock
|
||||
- assigned-clock-parents: the PLL phy clock parent
|
||||
- resets: phandle + reset specifier
|
||||
|
||||
Required nodes: one sub-node per port the controller provides.
|
||||
|
||||
Phy sub-nodes
|
||||
==============
|
||||
|
||||
Required properties:
|
||||
- reg: phy port index
|
||||
- phy-supply: phandle to the regulator providing 3V3 power to the PHY,
|
||||
see phy-bindings.txt in the same directory.
|
||||
- vdda1v1-supply: phandle to the regulator providing 1V1 power to the PHY
|
||||
- vdda1v8-supply: phandle to the regulator providing 1V8 power to the PHY
|
||||
- #phy-cells: see phy-bindings.txt in the same directory, must be <0> for PHY
|
||||
port#1 and must be <1> for PHY port#2, to select USB controller
|
||||
|
||||
|
||||
Example:
|
||||
usbphyc: usb-phy@5a006000 {
|
||||
compatible = "st,stm32mp1-usbphyc";
|
||||
reg = <0x5a006000 0x1000>;
|
||||
clocks = <&rcc_clk USBPHY_K>;
|
||||
resets = <&rcc_rst USBPHY_R>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
usbphyc_port0: usb-phy@0 {
|
||||
reg = <0>;
|
||||
phy-supply = <&vdd_usb>;
|
||||
vdda1v1-supply = <®11>;
|
||||
vdda1v8-supply = <®18>
|
||||
#phy-cells = <0>;
|
||||
};
|
||||
|
||||
usbphyc_port1: usb-phy@1 {
|
||||
reg = <1>;
|
||||
phy-supply = <&vdd_usb>;
|
||||
vdda1v1-supply = <®11>;
|
||||
vdda1v8-supply = <®18>
|
||||
#phy-cells = <1>;
|
||||
};
|
||||
};
|
|
@ -0,0 +1,138 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/phy/phy-stm32-usbphyc.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: STMicroelectronics STM32 USB HS PHY controller binding
|
||||
|
||||
description:
|
||||
|
||||
The STM32 USBPHYC block contains a dual port High Speed UTMI+ PHY and a UTMI
|
||||
switch. It controls PHY configuration and status, and the UTMI+ switch that
|
||||
selects either OTG or HOST controller for the second PHY port. It also sets
|
||||
PLL configuration.
|
||||
|
||||
USBPHYC
|
||||
|_ PLL
|
||||
|
|
||||
|_ PHY port#1 _________________ HOST controller
|
||||
| __ |
|
||||
| / 1|________________|
|
||||
|_ PHY port#2 ----| |________________
|
||||
| \_0| |
|
||||
|_ UTMI switch_______| OTG controller
|
||||
|
||||
maintainers:
|
||||
- Amelie Delaunay <amelie.delaunay@st.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: st,stm32mp1-usbphyc
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
"#address-cells":
|
||||
const: 1
|
||||
|
||||
"#size-cells":
|
||||
const: 0
|
||||
|
||||
#Required child nodes:
|
||||
|
||||
patternProperties:
|
||||
"^usb-phy@[0|1]$":
|
||||
type: object
|
||||
description:
|
||||
Each port the controller provides must be represented as a sub-node.
|
||||
|
||||
properties:
|
||||
reg:
|
||||
description: phy port index.
|
||||
maxItems: 1
|
||||
|
||||
phy-supply:
|
||||
description: regulator providing 3V3 power supply to the PHY.
|
||||
|
||||
vdda1v1-supply:
|
||||
description: regulator providing 1V1 power supply to the PLL block
|
||||
|
||||
vdda1v8-supply:
|
||||
description: regulator providing 1V8 power supply to the PLL block
|
||||
|
||||
"#phy-cells":
|
||||
enum: [ 0x0, 0x1 ]
|
||||
|
||||
allOf:
|
||||
- if:
|
||||
properties:
|
||||
reg:
|
||||
const: 0
|
||||
then:
|
||||
properties:
|
||||
"#phy-cells":
|
||||
const: 0
|
||||
else:
|
||||
properties:
|
||||
"#phy-cells":
|
||||
const: 1
|
||||
description:
|
||||
The value is used to select UTMI switch output.
|
||||
0 for OTG controller and 1 for Host controller.
|
||||
|
||||
required:
|
||||
- reg
|
||||
- phy-supply
|
||||
- vdda1v1-supply
|
||||
- vdda1v8-supply
|
||||
- "#phy-cells"
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
- "#address-cells"
|
||||
- "#size-cells"
|
||||
- usb-phy@0
|
||||
- usb-phy@1
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/stm32mp1-clks.h>
|
||||
#include <dt-bindings/reset/stm32mp1-resets.h>
|
||||
usbphyc: usbphyc@5a006000 {
|
||||
compatible = "st,stm32mp1-usbphyc";
|
||||
reg = <0x5a006000 0x1000>;
|
||||
clocks = <&rcc USBPHY_K>;
|
||||
resets = <&rcc USBPHY_R>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
usbphyc_port0: usb-phy@0 {
|
||||
reg = <0>;
|
||||
phy-supply = <&vdd_usb>;
|
||||
vdda1v1-supply = <®11>;
|
||||
vdda1v8-supply = <®18>;
|
||||
#phy-cells = <0>;
|
||||
};
|
||||
|
||||
usbphyc_port1: usb-phy@1 {
|
||||
reg = <1>;
|
||||
phy-supply = <&vdd_usb>;
|
||||
vdda1v1-supply = <®11>;
|
||||
vdda1v8-supply = <®18>;
|
||||
#phy-cells = <1>;
|
||||
};
|
||||
};
|
||||
...
|
|
@ -31,6 +31,9 @@ properties:
|
|||
- qcom,sdm845-qmp-usb3-uni-phy
|
||||
- qcom,sm8150-qmp-ufs-phy
|
||||
- qcom,sm8250-qmp-ufs-phy
|
||||
- qcom,sm8250-qmp-gen3x1-pcie-phy
|
||||
- qcom,sm8250-qmp-gen3x2-pcie-phy
|
||||
- qcom,sm8250-qmp-modem-pcie-phy
|
||||
|
||||
reg:
|
||||
items:
|
||||
|
@ -259,6 +262,9 @@ allOf:
|
|||
enum:
|
||||
- qcom,sdm845-qhp-pcie-phy
|
||||
- qcom,sdm845-qmp-pcie-phy
|
||||
- qcom,sm8250-qmp-gen3x1-pcie-phy
|
||||
- qcom,sm8250-qmp-gen3x2-pcie-phy
|
||||
- qcom,sm8250-qmp-modem-pcie-phy
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
|
|
|
@ -16,6 +16,11 @@ Optional properties:
|
|||
- drive-impedance-ohm: Specifies the drive impedance in Ohm.
|
||||
Possible values are 33, 40, 50, 66 and 100.
|
||||
If not set, the default value of 50 will be applied.
|
||||
- enable-strobe-pulldown: Enable internal pull-down for the strobe line.
|
||||
If not set, pull-down is not used.
|
||||
- output-tapdelay-select: Specifies the phyctrl_otapdlysec register.
|
||||
If not set, the register defaults to 0x4.
|
||||
Maximum value 0xf.
|
||||
|
||||
Example:
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@ Required properties:
|
|||
- "samsung,exynos4210-usb2-phy"
|
||||
- "samsung,exynos4x12-usb2-phy"
|
||||
- "samsung,exynos5250-usb2-phy"
|
||||
- "samsung,exynos5420-usb2-phy"
|
||||
- "samsung,s5pv210-usb2-phy"
|
||||
- reg : a list of registers used by phy driver
|
||||
- first and obligatory is the location of phy modules registers
|
||||
|
|
|
@ -82,7 +82,7 @@ resolution is read back from the chip and verified.
|
|||
|
||||
Note: Changing the resolution reverts the conversion time to default.
|
||||
|
||||
The write-only sysfs entry ``eeprom`` is an alternative for EEPROM operations.
|
||||
The write-only sysfs entry ``eeprom_cmd`` is an alternative for EEPROM operations.
|
||||
Write ``save`` to save device RAM to EEPROM. Write ``restore`` to restore EEPROM
|
||||
data in device RAM.
|
||||
|
||||
|
|
32
MAINTAINERS
32
MAINTAINERS
|
@ -7741,9 +7741,9 @@ F: drivers/clocksource/h8300_*.c
|
|||
F: drivers/irqchip/irq-renesas-h8*.c
|
||||
|
||||
HABANALABS PCI DRIVER
|
||||
M: Oded Gabbay <oded.gabbay@gmail.com>
|
||||
M: Oded Gabbay <ogabbay@kernel.org>
|
||||
S: Supported
|
||||
T: git https://github.com/HabanaAI/linux.git
|
||||
T: git https://git.kernel.org/pub/scm/linux/kernel/git/ogabbay/linux.git
|
||||
F: Documentation/ABI/testing/debugfs-driver-habanalabs
|
||||
F: Documentation/ABI/testing/sysfs-driver-habanalabs
|
||||
F: drivers/misc/habanalabs/
|
||||
|
@ -11171,6 +11171,12 @@ S: Maintained
|
|||
F: Documentation/devicetree/bindings/i2c/i2c-mt7621.txt
|
||||
F: drivers/i2c/busses/i2c-mt7621.c
|
||||
|
||||
MEDIATEK MT7621 PHY PCI DRIVER
|
||||
M: Sergio Paracuellos <sergio.paracuellos@gmail.com>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/phy/mediatek,mt7621-pci-phy.yaml
|
||||
F: drivers/phy/ralink/phy-mt7621-pci.c
|
||||
|
||||
MEDIATEK NAND CONTROLLER DRIVER
|
||||
L: linux-mtd@lists.infradead.org
|
||||
S: Orphan
|
||||
|
@ -14647,6 +14653,14 @@ F: Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
|
|||
F: drivers/mailbox/qcom-ipcc.c
|
||||
F: include/dt-bindings/mailbox/qcom-ipcc.h
|
||||
|
||||
QUALCOMM IPQ4019 USB PHY DRIVER
|
||||
M: Robert Marko <robert.marko@sartura.hr>
|
||||
M: Luka Perkov <luka.perkov@sartura.hr>
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/phy/qcom-usb-ipq4019-phy.yaml
|
||||
F: drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
|
||||
|
||||
QUALCOMM IPQ4019 VQMMC REGULATOR DRIVER
|
||||
M: Robert Marko <robert.marko@sartura.hr>
|
||||
M: Luka Perkov <luka.perkov@sartura.hr>
|
||||
|
@ -15508,6 +15522,14 @@ L: linux-fbdev@vger.kernel.org
|
|||
S: Maintained
|
||||
F: drivers/video/fbdev/s3c-fb.c
|
||||
|
||||
SAMSUNG INTERCONNECT DRIVERS
|
||||
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
|
||||
M: Artur Świgoń <a.swigon@samsung.com>
|
||||
L: linux-pm@vger.kernel.org
|
||||
L: linux-samsung-soc@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/interconnect/samsung/
|
||||
|
||||
SAMSUNG LAPTOP DRIVER
|
||||
M: Corentin Chary <corentin.chary@gmail.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
|
@ -16624,8 +16646,10 @@ F: Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst
|
|||
F: drivers/net/ethernet/toshiba/spider_net*
|
||||
|
||||
SPMI SUBSYSTEM
|
||||
R: Stephen Boyd <sboyd@kernel.org>
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
M: Stephen Boyd <sboyd@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sboyd/spmi.git
|
||||
F: Documentation/devicetree/bindings/spmi/
|
||||
F: drivers/spmi/
|
||||
F: include/dt-bindings/spmi/spmi.h
|
||||
|
|
|
@ -23,12 +23,15 @@ enum msg_index_t {
|
|||
MSG_OFF = MSG_STATUS_START,
|
||||
MSG_ON,
|
||||
MSG_NO_WINDOW,
|
||||
|
||||
/* These must be ordered the same as enum cursor_track */
|
||||
MSG_CURSOR_MSGS_START,
|
||||
MSG_CURSORING_OFF = MSG_CURSOR_MSGS_START,
|
||||
MSG_CURSORING_ON,
|
||||
MSG_HIGHLIGHT_TRACKING,
|
||||
MSG_READ_WINDOW,
|
||||
MSG_READ_ALL,
|
||||
|
||||
MSG_EDIT_DONE,
|
||||
MSG_WINDOW_ALREADY_SET,
|
||||
MSG_END_BEFORE_START,
|
||||
|
@ -41,11 +44,14 @@ enum msg_index_t {
|
|||
MSG_LEAVING_HELP,
|
||||
MSG_IS_UNASSIGNED,
|
||||
MSG_HELP_INFO,
|
||||
|
||||
/* These must be ordered the same as enum edge */
|
||||
MSG_EDGE_MSGS_START,
|
||||
MSG_EDGE_TOP = MSG_EDGE_MSGS_START,
|
||||
MSG_EDGE_BOTTOM,
|
||||
MSG_EDGE_LEFT,
|
||||
MSG_EDGE_RIGHT,
|
||||
|
||||
MSG_NUMBER,
|
||||
MSG_SPACE,
|
||||
MSG_START, /* A little confusing, given our convention. */
|
||||
|
|
|
@ -90,19 +90,18 @@ const u_char spk_key_defaults[] = {
|
|||
#include "speakupmap.h"
|
||||
};
|
||||
|
||||
/* Speakup Cursor Track Variables */
|
||||
static int cursor_track = 1, prev_cursor_track = 1;
|
||||
|
||||
/* cursor track modes, must be ordered same as cursor_msgs */
|
||||
enum {
|
||||
/* cursor track modes, must be ordered same as cursor_msgs in enum msg_index_t */
|
||||
enum cursor_track {
|
||||
CT_Off = 0,
|
||||
CT_On,
|
||||
CT_Highlight,
|
||||
CT_Window,
|
||||
CT_Max
|
||||
CT_Max,
|
||||
read_all_mode = CT_Max,
|
||||
};
|
||||
|
||||
#define read_all_mode CT_Max
|
||||
/* Speakup Cursor Track Variables */
|
||||
static enum cursor_track cursor_track = 1, prev_cursor_track = 1;
|
||||
|
||||
static struct tty_struct *tty;
|
||||
|
||||
|
@ -404,15 +403,17 @@ static void say_attributes(struct vc_data *vc)
|
|||
synth_printf("%s\n", spk_msg_get(MSG_COLORS_START + bg));
|
||||
}
|
||||
|
||||
enum {
|
||||
edge_top = 1,
|
||||
/* must be ordered same as edge_msgs in enum msg_index_t */
|
||||
enum edge {
|
||||
edge_none = 0,
|
||||
edge_top,
|
||||
edge_bottom,
|
||||
edge_left,
|
||||
edge_right,
|
||||
edge_quiet
|
||||
};
|
||||
|
||||
static void announce_edge(struct vc_data *vc, int msg_id)
|
||||
static void announce_edge(struct vc_data *vc, enum edge msg_id)
|
||||
{
|
||||
if (spk_bleeps & 1)
|
||||
bleep(spk_y);
|
||||
|
@ -607,7 +608,8 @@ static void say_prev_word(struct vc_data *vc)
|
|||
{
|
||||
u_char temp;
|
||||
u16 ch;
|
||||
u_short edge_said = 0, last_state = 0, state = 0;
|
||||
enum edge edge_said = edge_none;
|
||||
u_short last_state = 0, state = 0;
|
||||
|
||||
spk_parked |= 0x01;
|
||||
|
||||
|
@ -652,7 +654,7 @@ static void say_prev_word(struct vc_data *vc)
|
|||
}
|
||||
if (spk_x == 0 && edge_said == edge_quiet)
|
||||
edge_said = edge_left;
|
||||
if (edge_said > 0 && edge_said < edge_quiet)
|
||||
if (edge_said > edge_none && edge_said < edge_quiet)
|
||||
announce_edge(vc, edge_said);
|
||||
say_word(vc);
|
||||
}
|
||||
|
@ -661,7 +663,8 @@ static void say_next_word(struct vc_data *vc)
|
|||
{
|
||||
u_char temp;
|
||||
u16 ch;
|
||||
u_short edge_said = 0, last_state = 2, state = 0;
|
||||
enum edge edge_said = edge_none;
|
||||
u_short last_state = 2, state = 0;
|
||||
|
||||
spk_parked |= 0x01;
|
||||
if (spk_x == vc->vc_cols - 1 && spk_y == vc->vc_rows - 1) {
|
||||
|
@ -693,7 +696,7 @@ static void say_next_word(struct vc_data *vc)
|
|||
spk_pos += 2;
|
||||
last_state = state;
|
||||
}
|
||||
if (edge_said > 0)
|
||||
if (edge_said > edge_none)
|
||||
announce_edge(vc, edge_said);
|
||||
say_word(vc);
|
||||
}
|
||||
|
@ -1365,31 +1368,30 @@ static void speakup_deallocate(struct vc_data *vc)
|
|||
speakup_console[vc_num] = NULL;
|
||||
}
|
||||
|
||||
static u_char is_cursor;
|
||||
static u_long old_cursor_pos, old_cursor_x, old_cursor_y;
|
||||
static int cursor_con;
|
||||
|
||||
static void reset_highlight_buffers(struct vc_data *);
|
||||
|
||||
static int read_all_key;
|
||||
|
||||
static int in_keyboard_notifier;
|
||||
|
||||
static void start_read_all_timer(struct vc_data *vc, int command);
|
||||
|
||||
enum {
|
||||
RA_NOTHING,
|
||||
RA_NEXT_SENT,
|
||||
RA_PREV_LINE,
|
||||
RA_NEXT_LINE,
|
||||
RA_PREV_SENT,
|
||||
enum read_all_command {
|
||||
RA_NEXT_SENT = KVAL(K_DOWN)+1,
|
||||
RA_PREV_LINE = KVAL(K_LEFT)+1,
|
||||
RA_NEXT_LINE = KVAL(K_RIGHT)+1,
|
||||
RA_PREV_SENT = KVAL(K_UP)+1,
|
||||
RA_DOWN_ARROW,
|
||||
RA_TIMER,
|
||||
RA_FIND_NEXT_SENT,
|
||||
RA_FIND_PREV_SENT,
|
||||
};
|
||||
|
||||
static void kbd_fakekey2(struct vc_data *vc, int command)
|
||||
static u_char is_cursor;
|
||||
static u_long old_cursor_pos, old_cursor_x, old_cursor_y;
|
||||
static int cursor_con;
|
||||
|
||||
static void reset_highlight_buffers(struct vc_data *);
|
||||
|
||||
static enum read_all_command read_all_key;
|
||||
|
||||
static int in_keyboard_notifier;
|
||||
|
||||
static void start_read_all_timer(struct vc_data *vc, enum read_all_command command);
|
||||
|
||||
static void kbd_fakekey2(struct vc_data *vc, enum read_all_command command)
|
||||
{
|
||||
del_timer(&cursor_timer);
|
||||
speakup_fake_down_arrow();
|
||||
|
@ -1426,7 +1428,7 @@ static void stop_read_all(struct vc_data *vc)
|
|||
spk_do_flush();
|
||||
}
|
||||
|
||||
static void start_read_all_timer(struct vc_data *vc, int command)
|
||||
static void start_read_all_timer(struct vc_data *vc, enum read_all_command command)
|
||||
{
|
||||
struct var_t *cursor_timeout;
|
||||
|
||||
|
@ -1437,7 +1439,7 @@ static void start_read_all_timer(struct vc_data *vc, int command)
|
|||
jiffies + msecs_to_jiffies(cursor_timeout->u.n.value));
|
||||
}
|
||||
|
||||
static void handle_cursor_read_all(struct vc_data *vc, int command)
|
||||
static void handle_cursor_read_all(struct vc_data *vc, enum read_all_command command)
|
||||
{
|
||||
int indcount, sentcount, rv, sn;
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ static unsigned char get_index(struct spk_synth *synth);
|
|||
static int in_escape;
|
||||
static int is_flushing;
|
||||
|
||||
static spinlock_t flush_lock;
|
||||
static DEFINE_SPINLOCK(flush_lock);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(flush);
|
||||
|
||||
static struct var_t vars[] = {
|
||||
|
|
|
@ -80,6 +80,11 @@ static struct attribute *synth_attrs[] = {
|
|||
NULL, /* need to NULL terminate the list of attributes */
|
||||
};
|
||||
|
||||
static void read_buff_add(u_char c)
|
||||
{
|
||||
pr_info("speakup_dummy: got character %02x\n", c);
|
||||
}
|
||||
|
||||
static struct spk_synth synth_dummy = {
|
||||
.name = "dummy",
|
||||
.version = DRV_VERSION,
|
||||
|
@ -103,7 +108,7 @@ static struct spk_synth synth_dummy = {
|
|||
.flush = spk_synth_flush,
|
||||
.is_alive = spk_synth_is_alive_restart,
|
||||
.synth_adjust = NULL,
|
||||
.read_buff_add = NULL,
|
||||
.read_buff_add = read_buff_add,
|
||||
.get_index = NULL,
|
||||
.indexing = {
|
||||
.command = NULL,
|
||||
|
|
|
@ -68,11 +68,9 @@
|
|||
#include <linux/sizes.h>
|
||||
|
||||
#include <uapi/linux/android/binder.h>
|
||||
#include <uapi/linux/android/binderfs.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#include "binder_alloc.h"
|
||||
#include "binder_internal.h"
|
||||
#include "binder_trace.h"
|
||||
|
||||
|
@ -160,24 +158,6 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
|
|||
#define to_binder_fd_array_object(hdr) \
|
||||
container_of(hdr, struct binder_fd_array_object, hdr)
|
||||
|
||||
enum binder_stat_types {
|
||||
BINDER_STAT_PROC,
|
||||
BINDER_STAT_THREAD,
|
||||
BINDER_STAT_NODE,
|
||||
BINDER_STAT_REF,
|
||||
BINDER_STAT_DEATH,
|
||||
BINDER_STAT_TRANSACTION,
|
||||
BINDER_STAT_TRANSACTION_COMPLETE,
|
||||
BINDER_STAT_COUNT
|
||||
};
|
||||
|
||||
struct binder_stats {
|
||||
atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
|
||||
atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
|
||||
atomic_t obj_created[BINDER_STAT_COUNT];
|
||||
atomic_t obj_deleted[BINDER_STAT_COUNT];
|
||||
};
|
||||
|
||||
static struct binder_stats binder_stats;
|
||||
|
||||
static inline void binder_stats_deleted(enum binder_stat_types type)
|
||||
|
@ -213,278 +193,11 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
|
|||
return e;
|
||||
}
|
||||
|
||||
/**
|
||||
* struct binder_work - work enqueued on a worklist
|
||||
* @entry: node enqueued on list
|
||||
* @type: type of work to be performed
|
||||
*
|
||||
* There are separate work lists for proc, thread, and node (async).
|
||||
*/
|
||||
struct binder_work {
|
||||
struct list_head entry;
|
||||
|
||||
enum binder_work_type {
|
||||
BINDER_WORK_TRANSACTION = 1,
|
||||
BINDER_WORK_TRANSACTION_COMPLETE,
|
||||
BINDER_WORK_RETURN_ERROR,
|
||||
BINDER_WORK_NODE,
|
||||
BINDER_WORK_DEAD_BINDER,
|
||||
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
|
||||
BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
|
||||
} type;
|
||||
};
|
||||
|
||||
struct binder_error {
|
||||
struct binder_work work;
|
||||
uint32_t cmd;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_node - binder node bookkeeping
|
||||
* @debug_id: unique ID for debugging
|
||||
* (invariant after initialized)
|
||||
* @lock: lock for node fields
|
||||
* @work: worklist element for node work
|
||||
* (protected by @proc->inner_lock)
|
||||
* @rb_node: element for proc->nodes tree
|
||||
* (protected by @proc->inner_lock)
|
||||
* @dead_node: element for binder_dead_nodes list
|
||||
* (protected by binder_dead_nodes_lock)
|
||||
* @proc: binder_proc that owns this node
|
||||
* (invariant after initialized)
|
||||
* @refs: list of references on this node
|
||||
* (protected by @lock)
|
||||
* @internal_strong_refs: used to take strong references when
|
||||
* initiating a transaction
|
||||
* (protected by @proc->inner_lock if @proc
|
||||
* and by @lock)
|
||||
* @local_weak_refs: weak user refs from local process
|
||||
* (protected by @proc->inner_lock if @proc
|
||||
* and by @lock)
|
||||
* @local_strong_refs: strong user refs from local process
|
||||
* (protected by @proc->inner_lock if @proc
|
||||
* and by @lock)
|
||||
* @tmp_refs: temporary kernel refs
|
||||
* (protected by @proc->inner_lock while @proc
|
||||
* is valid, and by binder_dead_nodes_lock
|
||||
* if @proc is NULL. During inc/dec and node release
|
||||
* it is also protected by @lock to provide safety
|
||||
* as the node dies and @proc becomes NULL)
|
||||
* @ptr: userspace pointer for node
|
||||
* (invariant, no lock needed)
|
||||
* @cookie: userspace cookie for node
|
||||
* (invariant, no lock needed)
|
||||
* @has_strong_ref: userspace notified of strong ref
|
||||
* (protected by @proc->inner_lock if @proc
|
||||
* and by @lock)
|
||||
* @pending_strong_ref: userspace has acked notification of strong ref
|
||||
* (protected by @proc->inner_lock if @proc
|
||||
* and by @lock)
|
||||
* @has_weak_ref: userspace notified of weak ref
|
||||
* (protected by @proc->inner_lock if @proc
|
||||
* and by @lock)
|
||||
* @pending_weak_ref: userspace has acked notification of weak ref
|
||||
* (protected by @proc->inner_lock if @proc
|
||||
* and by @lock)
|
||||
* @has_async_transaction: async transaction to node in progress
|
||||
* (protected by @lock)
|
||||
* @accept_fds: file descriptor operations supported for node
|
||||
* (invariant after initialized)
|
||||
* @min_priority: minimum scheduling priority
|
||||
* (invariant after initialized)
|
||||
* @txn_security_ctx: require sender's security context
|
||||
* (invariant after initialized)
|
||||
* @async_todo: list of async work items
|
||||
* (protected by @proc->inner_lock)
|
||||
*
|
||||
* Bookkeeping structure for binder nodes.
|
||||
*/
|
||||
struct binder_node {
|
||||
int debug_id;
|
||||
spinlock_t lock;
|
||||
struct binder_work work;
|
||||
union {
|
||||
struct rb_node rb_node;
|
||||
struct hlist_node dead_node;
|
||||
};
|
||||
struct binder_proc *proc;
|
||||
struct hlist_head refs;
|
||||
int internal_strong_refs;
|
||||
int local_weak_refs;
|
||||
int local_strong_refs;
|
||||
int tmp_refs;
|
||||
binder_uintptr_t ptr;
|
||||
binder_uintptr_t cookie;
|
||||
struct {
|
||||
/*
|
||||
* bitfield elements protected by
|
||||
* proc inner_lock
|
||||
*/
|
||||
u8 has_strong_ref:1;
|
||||
u8 pending_strong_ref:1;
|
||||
u8 has_weak_ref:1;
|
||||
u8 pending_weak_ref:1;
|
||||
};
|
||||
struct {
|
||||
/*
|
||||
* invariant after initialization
|
||||
*/
|
||||
u8 accept_fds:1;
|
||||
u8 txn_security_ctx:1;
|
||||
u8 min_priority;
|
||||
};
|
||||
bool has_async_transaction;
|
||||
struct list_head async_todo;
|
||||
};
|
||||
|
||||
struct binder_ref_death {
|
||||
/**
|
||||
* @work: worklist element for death notifications
|
||||
* (protected by inner_lock of the proc that
|
||||
* this ref belongs to)
|
||||
*/
|
||||
struct binder_work work;
|
||||
binder_uintptr_t cookie;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_ref_data - binder_ref counts and id
|
||||
* @debug_id: unique ID for the ref
|
||||
* @desc: unique userspace handle for ref
|
||||
* @strong: strong ref count (debugging only if not locked)
|
||||
* @weak: weak ref count (debugging only if not locked)
|
||||
*
|
||||
* Structure to hold ref count and ref id information. Since
|
||||
* the actual ref can only be accessed with a lock, this structure
|
||||
* is used to return information about the ref to callers of
|
||||
* ref inc/dec functions.
|
||||
*/
|
||||
struct binder_ref_data {
|
||||
int debug_id;
|
||||
uint32_t desc;
|
||||
int strong;
|
||||
int weak;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_ref - struct to track references on nodes
|
||||
* @data: binder_ref_data containing id, handle, and current refcounts
|
||||
* @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
|
||||
* @rb_node_node: node for lookup by @node in proc's rb_tree
|
||||
* @node_entry: list entry for node->refs list in target node
|
||||
* (protected by @node->lock)
|
||||
* @proc: binder_proc containing ref
|
||||
* @node: binder_node of target node. When cleaning up a
|
||||
* ref for deletion in binder_cleanup_ref, a non-NULL
|
||||
* @node indicates the node must be freed
|
||||
* @death: pointer to death notification (ref_death) if requested
|
||||
* (protected by @node->lock)
|
||||
*
|
||||
* Structure to track references from procA to target node (on procB). This
|
||||
* structure is unsafe to access without holding @proc->outer_lock.
|
||||
*/
|
||||
struct binder_ref {
|
||||
/* Lookups needed: */
|
||||
/* node + proc => ref (transaction) */
|
||||
/* desc + proc => ref (transaction, inc/dec ref) */
|
||||
/* node => refs + procs (proc exit) */
|
||||
struct binder_ref_data data;
|
||||
struct rb_node rb_node_desc;
|
||||
struct rb_node rb_node_node;
|
||||
struct hlist_node node_entry;
|
||||
struct binder_proc *proc;
|
||||
struct binder_node *node;
|
||||
struct binder_ref_death *death;
|
||||
};
|
||||
|
||||
enum binder_deferred_state {
|
||||
BINDER_DEFERRED_FLUSH = 0x01,
|
||||
BINDER_DEFERRED_RELEASE = 0x02,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_proc - binder process bookkeeping
|
||||
* @proc_node: element for binder_procs list
|
||||
* @threads: rbtree of binder_threads in this proc
|
||||
* (protected by @inner_lock)
|
||||
* @nodes: rbtree of binder nodes associated with
|
||||
* this proc ordered by node->ptr
|
||||
* (protected by @inner_lock)
|
||||
* @refs_by_desc: rbtree of refs ordered by ref->desc
|
||||
* (protected by @outer_lock)
|
||||
* @refs_by_node: rbtree of refs ordered by ref->node
|
||||
* (protected by @outer_lock)
|
||||
* @waiting_threads: threads currently waiting for proc work
|
||||
* (protected by @inner_lock)
|
||||
* @pid PID of group_leader of process
|
||||
* (invariant after initialized)
|
||||
* @tsk task_struct for group_leader of process
|
||||
* (invariant after initialized)
|
||||
* @deferred_work_node: element for binder_deferred_list
|
||||
* (protected by binder_deferred_lock)
|
||||
* @deferred_work: bitmap of deferred work to perform
|
||||
* (protected by binder_deferred_lock)
|
||||
* @is_dead: process is dead and awaiting free
|
||||
* when outstanding transactions are cleaned up
|
||||
* (protected by @inner_lock)
|
||||
* @todo: list of work for this process
|
||||
* (protected by @inner_lock)
|
||||
* @stats: per-process binder statistics
|
||||
* (atomics, no lock needed)
|
||||
* @delivered_death: list of delivered death notification
|
||||
* (protected by @inner_lock)
|
||||
* @max_threads: cap on number of binder threads
|
||||
* (protected by @inner_lock)
|
||||
* @requested_threads: number of binder threads requested but not
|
||||
* yet started. In current implementation, can
|
||||
* only be 0 or 1.
|
||||
* (protected by @inner_lock)
|
||||
* @requested_threads_started: number binder threads started
|
||||
* (protected by @inner_lock)
|
||||
* @tmp_ref: temporary reference to indicate proc is in use
|
||||
* (protected by @inner_lock)
|
||||
* @default_priority: default scheduler priority
|
||||
* (invariant after initialized)
|
||||
* @debugfs_entry: debugfs node
|
||||
* @alloc: binder allocator bookkeeping
|
||||
* @context: binder_context for this proc
|
||||
* (invariant after initialized)
|
||||
* @inner_lock: can nest under outer_lock and/or node lock
|
||||
* @outer_lock: no nesting under innor or node lock
|
||||
* Lock order: 1) outer, 2) node, 3) inner
|
||||
* @binderfs_entry: process-specific binderfs log file
|
||||
*
|
||||
* Bookkeeping structure for binder processes
|
||||
*/
|
||||
struct binder_proc {
|
||||
struct hlist_node proc_node;
|
||||
struct rb_root threads;
|
||||
struct rb_root nodes;
|
||||
struct rb_root refs_by_desc;
|
||||
struct rb_root refs_by_node;
|
||||
struct list_head waiting_threads;
|
||||
int pid;
|
||||
struct task_struct *tsk;
|
||||
struct hlist_node deferred_work_node;
|
||||
int deferred_work;
|
||||
bool is_dead;
|
||||
|
||||
struct list_head todo;
|
||||
struct binder_stats stats;
|
||||
struct list_head delivered_death;
|
||||
int max_threads;
|
||||
int requested_threads;
|
||||
int requested_threads_started;
|
||||
int tmp_ref;
|
||||
long default_priority;
|
||||
struct dentry *debugfs_entry;
|
||||
struct binder_alloc alloc;
|
||||
struct binder_context *context;
|
||||
spinlock_t inner_lock;
|
||||
spinlock_t outer_lock;
|
||||
struct dentry *binderfs_entry;
|
||||
};
|
||||
|
||||
enum {
|
||||
BINDER_LOOPER_STATE_REGISTERED = 0x01,
|
||||
BINDER_LOOPER_STATE_ENTERED = 0x02,
|
||||
|
@ -494,125 +207,6 @@ enum {
|
|||
BINDER_LOOPER_STATE_POLL = 0x20,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_thread - binder thread bookkeeping
|
||||
* @proc: binder process for this thread
|
||||
* (invariant after initialization)
|
||||
* @rb_node: element for proc->threads rbtree
|
||||
* (protected by @proc->inner_lock)
|
||||
* @waiting_thread_node: element for @proc->waiting_threads list
|
||||
* (protected by @proc->inner_lock)
|
||||
* @pid: PID for this thread
|
||||
* (invariant after initialization)
|
||||
* @looper: bitmap of looping state
|
||||
* (only accessed by this thread)
|
||||
* @looper_needs_return: looping thread needs to exit driver
|
||||
* (no lock needed)
|
||||
* @transaction_stack: stack of in-progress transactions for this thread
|
||||
* (protected by @proc->inner_lock)
|
||||
* @todo: list of work to do for this thread
|
||||
* (protected by @proc->inner_lock)
|
||||
* @process_todo: whether work in @todo should be processed
|
||||
* (protected by @proc->inner_lock)
|
||||
* @return_error: transaction errors reported by this thread
|
||||
* (only accessed by this thread)
|
||||
* @reply_error: transaction errors reported by target thread
|
||||
* (protected by @proc->inner_lock)
|
||||
* @wait: wait queue for thread work
|
||||
* @stats: per-thread statistics
|
||||
* (atomics, no lock needed)
|
||||
* @tmp_ref: temporary reference to indicate thread is in use
|
||||
* (atomic since @proc->inner_lock cannot
|
||||
* always be acquired)
|
||||
* @is_dead: thread is dead and awaiting free
|
||||
* when outstanding transactions are cleaned up
|
||||
* (protected by @proc->inner_lock)
|
||||
*
|
||||
* Bookkeeping structure for binder threads.
|
||||
*/
|
||||
struct binder_thread {
|
||||
struct binder_proc *proc;
|
||||
struct rb_node rb_node;
|
||||
struct list_head waiting_thread_node;
|
||||
int pid;
|
||||
int looper; /* only modified by this thread */
|
||||
bool looper_need_return; /* can be written by other thread */
|
||||
struct binder_transaction *transaction_stack;
|
||||
struct list_head todo;
|
||||
bool process_todo;
|
||||
struct binder_error return_error;
|
||||
struct binder_error reply_error;
|
||||
wait_queue_head_t wait;
|
||||
struct binder_stats stats;
|
||||
atomic_t tmp_ref;
|
||||
bool is_dead;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_txn_fd_fixup - transaction fd fixup list element
|
||||
* @fixup_entry: list entry
|
||||
* @file: struct file to be associated with new fd
|
||||
* @offset: offset in buffer data to this fixup
|
||||
*
|
||||
* List element for fd fixups in a transaction. Since file
|
||||
* descriptors need to be allocated in the context of the
|
||||
* target process, we pass each fd to be processed in this
|
||||
* struct.
|
||||
*/
|
||||
struct binder_txn_fd_fixup {
|
||||
struct list_head fixup_entry;
|
||||
struct file *file;
|
||||
size_t offset;
|
||||
};
|
||||
|
||||
struct binder_transaction {
|
||||
int debug_id;
|
||||
struct binder_work work;
|
||||
struct binder_thread *from;
|
||||
struct binder_transaction *from_parent;
|
||||
struct binder_proc *to_proc;
|
||||
struct binder_thread *to_thread;
|
||||
struct binder_transaction *to_parent;
|
||||
unsigned need_reply:1;
|
||||
/* unsigned is_dead:1; */ /* not used at the moment */
|
||||
|
||||
struct binder_buffer *buffer;
|
||||
unsigned int code;
|
||||
unsigned int flags;
|
||||
long priority;
|
||||
long saved_priority;
|
||||
kuid_t sender_euid;
|
||||
struct list_head fd_fixups;
|
||||
binder_uintptr_t security_ctx;
|
||||
/**
|
||||
* @lock: protects @from, @to_proc, and @to_thread
|
||||
*
|
||||
* @from, @to_proc, and @to_thread can be set to NULL
|
||||
* during thread teardown
|
||||
*/
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_object - union of flat binder object types
|
||||
* @hdr: generic object header
|
||||
* @fbo: binder object (nodes and refs)
|
||||
* @fdo: file descriptor object
|
||||
* @bbo: binder buffer pointer
|
||||
* @fdao: file descriptor array
|
||||
*
|
||||
* Used for type-independent object copies
|
||||
*/
|
||||
struct binder_object {
|
||||
union {
|
||||
struct binder_object_header hdr;
|
||||
struct flat_binder_object fbo;
|
||||
struct binder_fd_object fdo;
|
||||
struct binder_buffer_object bbo;
|
||||
struct binder_fd_array_object fdao;
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* binder_proc_lock() - Acquire outer lock for given binder_proc
|
||||
* @proc: struct binder_proc to acquire
|
||||
|
@ -1892,6 +1486,20 @@ static void binder_free_txn_fixups(struct binder_transaction *t)
|
|||
}
|
||||
}
|
||||
|
||||
static void binder_txn_latency_free(struct binder_transaction *t)
|
||||
{
|
||||
int from_proc, from_thread, to_proc, to_thread;
|
||||
|
||||
spin_lock(&t->lock);
|
||||
from_proc = t->from ? t->from->proc->pid : 0;
|
||||
from_thread = t->from ? t->from->pid : 0;
|
||||
to_proc = t->to_proc ? t->to_proc->pid : 0;
|
||||
to_thread = t->to_thread ? t->to_thread->pid : 0;
|
||||
spin_unlock(&t->lock);
|
||||
|
||||
trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
|
||||
}
|
||||
|
||||
static void binder_free_transaction(struct binder_transaction *t)
|
||||
{
|
||||
struct binder_proc *target_proc = t->to_proc;
|
||||
|
@ -1902,6 +1510,8 @@ static void binder_free_transaction(struct binder_transaction *t)
|
|||
t->buffer->transaction = NULL;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
}
|
||||
if (trace_binder_txn_latency_free_enabled())
|
||||
binder_txn_latency_free(t);
|
||||
/*
|
||||
* If the transaction has no target_proc, then
|
||||
* t->buffer->transaction has already been cleared.
|
||||
|
@ -3103,7 +2713,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
if (extra_buffers_size < added_size) {
|
||||
/* integer overflow of extra_buffers_size */
|
||||
return_error = BR_FAILED_REPLY;
|
||||
return_error_param = EINVAL;
|
||||
return_error_param = -EINVAL;
|
||||
return_error_line = __LINE__;
|
||||
goto err_bad_extra_size;
|
||||
}
|
||||
|
@ -3146,6 +2756,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
t->buffer->debug_id = t->debug_id;
|
||||
t->buffer->transaction = t;
|
||||
t->buffer->target_node = target_node;
|
||||
t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
|
||||
trace_binder_transaction_alloc_buf(t->buffer);
|
||||
|
||||
if (binder_alloc_copy_user_to_buffer(
|
||||
|
@ -3479,6 +3090,8 @@ err_get_secctx_failed:
|
|||
kfree(tcomplete);
|
||||
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
|
||||
err_alloc_tcomplete_failed:
|
||||
if (trace_binder_txn_latency_free_enabled())
|
||||
binder_txn_latency_free(t);
|
||||
kfree(t);
|
||||
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
||||
err_alloc_t_failed:
|
||||
|
@ -3614,6 +3227,7 @@ static int binder_thread_write(struct binder_proc *proc,
|
|||
ret = -1;
|
||||
if (increment && !target) {
|
||||
struct binder_node *ctx_mgr_node;
|
||||
|
||||
mutex_lock(&context->context_mgr_node_lock);
|
||||
ctx_mgr_node = context->binder_context_mgr_node;
|
||||
if (ctx_mgr_node) {
|
||||
|
|
|
@ -696,6 +696,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
|
|||
binder_insert_free_buffer(alloc, buffer);
|
||||
}
|
||||
|
||||
static void binder_alloc_clear_buf(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffer);
|
||||
/**
|
||||
* binder_alloc_free_buf() - free a binder buffer
|
||||
* @alloc: binder_alloc for this proc
|
||||
|
@ -706,6 +708,18 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
|
|||
void binder_alloc_free_buf(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffer)
|
||||
{
|
||||
/*
|
||||
* We could eliminate the call to binder_alloc_clear_buf()
|
||||
* from binder_alloc_deferred_release() by moving this to
|
||||
* binder_alloc_free_buf_locked(). However, that could
|
||||
* increase contention for the alloc mutex if clear_on_free
|
||||
* is used frequently for large buffers. The mutex is not
|
||||
* needed for correctness here.
|
||||
*/
|
||||
if (buffer->clear_on_free) {
|
||||
binder_alloc_clear_buf(alloc, buffer);
|
||||
buffer->clear_on_free = false;
|
||||
}
|
||||
mutex_lock(&alloc->mutex);
|
||||
binder_free_buf_locked(alloc, buffer);
|
||||
mutex_unlock(&alloc->mutex);
|
||||
|
@ -802,6 +816,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|||
/* Transaction should already have been freed */
|
||||
BUG_ON(buffer->transaction);
|
||||
|
||||
if (buffer->clear_on_free) {
|
||||
binder_alloc_clear_buf(alloc, buffer);
|
||||
buffer->clear_on_free = false;
|
||||
}
|
||||
binder_free_buf_locked(alloc, buffer);
|
||||
buffers++;
|
||||
}
|
||||
|
@ -1135,6 +1153,36 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
|
|||
return lru_page->page_ptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_alloc_clear_buf() - zero out buffer
|
||||
* @alloc: binder_alloc for this proc
|
||||
* @buffer: binder buffer to be cleared
|
||||
*
|
||||
* memset the given buffer to 0
|
||||
*/
|
||||
static void binder_alloc_clear_buf(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffer)
|
||||
{
|
||||
size_t bytes = binder_alloc_buffer_size(alloc, buffer);
|
||||
binder_size_t buffer_offset = 0;
|
||||
|
||||
while (bytes) {
|
||||
unsigned long size;
|
||||
struct page *page;
|
||||
pgoff_t pgoff;
|
||||
void *kptr;
|
||||
|
||||
page = binder_alloc_get_page(alloc, buffer,
|
||||
buffer_offset, &pgoff);
|
||||
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
|
||||
kptr = kmap(page) + pgoff;
|
||||
memset(kptr, 0, size);
|
||||
kunmap(page);
|
||||
bytes -= size;
|
||||
buffer_offset += size;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_alloc_copy_user_to_buffer() - copy src user to tgt user
|
||||
* @alloc: binder_alloc for this proc
|
||||
|
|
|
@ -23,6 +23,7 @@ struct binder_transaction;
|
|||
* @entry: entry alloc->buffers
|
||||
* @rb_node: node for allocated_buffers/free_buffers rb trees
|
||||
* @free: %true if buffer is free
|
||||
* @clear_on_free: %true if buffer must be zeroed after use
|
||||
* @allow_user_free: %true if user is allowed to free buffer
|
||||
* @async_transaction: %true if buffer is in use for an async txn
|
||||
* @debug_id: unique ID for debugging
|
||||
|
@ -41,9 +42,10 @@ struct binder_buffer {
|
|||
struct rb_node rb_node; /* free entry by size or allocated entry */
|
||||
/* by address */
|
||||
unsigned free:1;
|
||||
unsigned clear_on_free:1;
|
||||
unsigned allow_user_free:1;
|
||||
unsigned async_transaction:1;
|
||||
unsigned debug_id:29;
|
||||
unsigned debug_id:28;
|
||||
|
||||
struct binder_transaction *transaction;
|
||||
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
#include <linux/stddef.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/uidgid.h>
|
||||
#include <uapi/linux/android/binderfs.h>
|
||||
#include "binder_alloc.h"
|
||||
|
||||
struct binder_context {
|
||||
struct binder_node *binder_context_mgr_node;
|
||||
|
@ -141,6 +143,410 @@ struct binder_transaction_log {
|
|||
struct binder_transaction_log_entry entry[32];
|
||||
};
|
||||
|
||||
enum binder_stat_types {
|
||||
BINDER_STAT_PROC,
|
||||
BINDER_STAT_THREAD,
|
||||
BINDER_STAT_NODE,
|
||||
BINDER_STAT_REF,
|
||||
BINDER_STAT_DEATH,
|
||||
BINDER_STAT_TRANSACTION,
|
||||
BINDER_STAT_TRANSACTION_COMPLETE,
|
||||
BINDER_STAT_COUNT
|
||||
};
|
||||
|
||||
struct binder_stats {
|
||||
atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
|
||||
atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
|
||||
atomic_t obj_created[BINDER_STAT_COUNT];
|
||||
atomic_t obj_deleted[BINDER_STAT_COUNT];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_work - work enqueued on a worklist
|
||||
* @entry: node enqueued on list
|
||||
* @type: type of work to be performed
|
||||
*
|
||||
* There are separate work lists for proc, thread, and node (async).
|
||||
*/
|
||||
struct binder_work {
|
||||
struct list_head entry;
|
||||
|
||||
enum binder_work_type {
|
||||
BINDER_WORK_TRANSACTION = 1,
|
||||
BINDER_WORK_TRANSACTION_COMPLETE,
|
||||
BINDER_WORK_RETURN_ERROR,
|
||||
BINDER_WORK_NODE,
|
||||
BINDER_WORK_DEAD_BINDER,
|
||||
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
|
||||
BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
|
||||
} type;
|
||||
};
|
||||
|
||||
struct binder_error {
|
||||
struct binder_work work;
|
||||
uint32_t cmd;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_node - binder node bookkeeping
|
||||
* @debug_id: unique ID for debugging
|
||||
* (invariant after initialized)
|
||||
* @lock: lock for node fields
|
||||
* @work: worklist element for node work
|
||||
* (protected by @proc->inner_lock)
|
||||
* @rb_node: element for proc->nodes tree
|
||||
* (protected by @proc->inner_lock)
|
||||
* @dead_node: element for binder_dead_nodes list
|
||||
* (protected by binder_dead_nodes_lock)
|
||||
* @proc: binder_proc that owns this node
|
||||
* (invariant after initialized)
|
||||
* @refs: list of references on this node
|
||||
* (protected by @lock)
|
||||
* @internal_strong_refs: used to take strong references when
|
||||
* initiating a transaction
|
||||
* (protected by @proc->inner_lock if @proc
|
||||
* and by @lock)
|
||||
* @local_weak_refs: weak user refs from local process
|
||||
* (protected by @proc->inner_lock if @proc
|
||||
* and by @lock)
|
||||
* @local_strong_refs: strong user refs from local process
|
||||
* (protected by @proc->inner_lock if @proc
|
||||
* and by @lock)
|
||||
* @tmp_refs: temporary kernel refs
|
||||
* (protected by @proc->inner_lock while @proc
|
||||
* is valid, and by binder_dead_nodes_lock
|
||||
* if @proc is NULL. During inc/dec and node release
|
||||
* it is also protected by @lock to provide safety
|
||||
* as the node dies and @proc becomes NULL)
|
||||
* @ptr: userspace pointer for node
|
||||
* (invariant, no lock needed)
|
||||
* @cookie: userspace cookie for node
|
||||
* (invariant, no lock needed)
|
||||
* @has_strong_ref: userspace notified of strong ref
|
||||
* (protected by @proc->inner_lock if @proc
|
||||
* and by @lock)
|
||||
* @pending_strong_ref: userspace has acked notification of strong ref
|
||||
* (protected by @proc->inner_lock if @proc
|
||||
* and by @lock)
|
||||
* @has_weak_ref: userspace notified of weak ref
|
||||
* (protected by @proc->inner_lock if @proc
|
||||
* and by @lock)
|
||||
* @pending_weak_ref: userspace has acked notification of weak ref
|
||||
* (protected by @proc->inner_lock if @proc
|
||||
* and by @lock)
|
||||
* @has_async_transaction: async transaction to node in progress
|
||||
* (protected by @lock)
|
||||
* @accept_fds: file descriptor operations supported for node
|
||||
* (invariant after initialized)
|
||||
* @min_priority: minimum scheduling priority
|
||||
* (invariant after initialized)
|
||||
* @txn_security_ctx: require sender's security context
|
||||
* (invariant after initialized)
|
||||
* @async_todo: list of async work items
|
||||
* (protected by @proc->inner_lock)
|
||||
*
|
||||
* Bookkeeping structure for binder nodes.
|
||||
*/
|
||||
struct binder_node {
|
||||
int debug_id;
|
||||
spinlock_t lock;
|
||||
struct binder_work work;
|
||||
union {
|
||||
struct rb_node rb_node;
|
||||
struct hlist_node dead_node;
|
||||
};
|
||||
struct binder_proc *proc;
|
||||
struct hlist_head refs;
|
||||
int internal_strong_refs;
|
||||
int local_weak_refs;
|
||||
int local_strong_refs;
|
||||
int tmp_refs;
|
||||
binder_uintptr_t ptr;
|
||||
binder_uintptr_t cookie;
|
||||
struct {
|
||||
/*
|
||||
* bitfield elements protected by
|
||||
* proc inner_lock
|
||||
*/
|
||||
u8 has_strong_ref:1;
|
||||
u8 pending_strong_ref:1;
|
||||
u8 has_weak_ref:1;
|
||||
u8 pending_weak_ref:1;
|
||||
};
|
||||
struct {
|
||||
/*
|
||||
* invariant after initialization
|
||||
*/
|
||||
u8 accept_fds:1;
|
||||
u8 txn_security_ctx:1;
|
||||
u8 min_priority;
|
||||
};
|
||||
bool has_async_transaction;
|
||||
struct list_head async_todo;
|
||||
};
|
||||
|
||||
struct binder_ref_death {
|
||||
/**
|
||||
* @work: worklist element for death notifications
|
||||
* (protected by inner_lock of the proc that
|
||||
* this ref belongs to)
|
||||
*/
|
||||
struct binder_work work;
|
||||
binder_uintptr_t cookie;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_ref_data - binder_ref counts and id
|
||||
* @debug_id: unique ID for the ref
|
||||
* @desc: unique userspace handle for ref
|
||||
* @strong: strong ref count (debugging only if not locked)
|
||||
* @weak: weak ref count (debugging only if not locked)
|
||||
*
|
||||
* Structure to hold ref count and ref id information. Since
|
||||
* the actual ref can only be accessed with a lock, this structure
|
||||
* is used to return information about the ref to callers of
|
||||
* ref inc/dec functions.
|
||||
*/
|
||||
struct binder_ref_data {
|
||||
int debug_id;
|
||||
uint32_t desc;
|
||||
int strong;
|
||||
int weak;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_ref - struct to track references on nodes
|
||||
* @data: binder_ref_data containing id, handle, and current refcounts
|
||||
* @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
|
||||
* @rb_node_node: node for lookup by @node in proc's rb_tree
|
||||
* @node_entry: list entry for node->refs list in target node
|
||||
* (protected by @node->lock)
|
||||
* @proc: binder_proc containing ref
|
||||
* @node: binder_node of target node. When cleaning up a
|
||||
* ref for deletion in binder_cleanup_ref, a non-NULL
|
||||
* @node indicates the node must be freed
|
||||
* @death: pointer to death notification (ref_death) if requested
|
||||
* (protected by @node->lock)
|
||||
*
|
||||
* Structure to track references from procA to target node (on procB). This
|
||||
* structure is unsafe to access without holding @proc->outer_lock.
|
||||
*/
|
||||
struct binder_ref {
|
||||
/* Lookups needed: */
|
||||
/* node + proc => ref (transaction) */
|
||||
/* desc + proc => ref (transaction, inc/dec ref) */
|
||||
/* node => refs + procs (proc exit) */
|
||||
struct binder_ref_data data;
|
||||
struct rb_node rb_node_desc;
|
||||
struct rb_node rb_node_node;
|
||||
struct hlist_node node_entry;
|
||||
struct binder_proc *proc;
|
||||
struct binder_node *node;
|
||||
struct binder_ref_death *death;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_proc - binder process bookkeeping
|
||||
* @proc_node: element for binder_procs list
|
||||
* @threads: rbtree of binder_threads in this proc
|
||||
* (protected by @inner_lock)
|
||||
* @nodes: rbtree of binder nodes associated with
|
||||
* this proc ordered by node->ptr
|
||||
* (protected by @inner_lock)
|
||||
* @refs_by_desc: rbtree of refs ordered by ref->desc
|
||||
* (protected by @outer_lock)
|
||||
* @refs_by_node: rbtree of refs ordered by ref->node
|
||||
* (protected by @outer_lock)
|
||||
* @waiting_threads: threads currently waiting for proc work
|
||||
* (protected by @inner_lock)
|
||||
* @pid PID of group_leader of process
|
||||
* (invariant after initialized)
|
||||
* @tsk task_struct for group_leader of process
|
||||
* (invariant after initialized)
|
||||
* @deferred_work_node: element for binder_deferred_list
|
||||
* (protected by binder_deferred_lock)
|
||||
* @deferred_work: bitmap of deferred work to perform
|
||||
* (protected by binder_deferred_lock)
|
||||
* @is_dead: process is dead and awaiting free
|
||||
* when outstanding transactions are cleaned up
|
||||
* (protected by @inner_lock)
|
||||
* @todo: list of work for this process
|
||||
* (protected by @inner_lock)
|
||||
* @stats: per-process binder statistics
|
||||
* (atomics, no lock needed)
|
||||
* @delivered_death: list of delivered death notification
|
||||
* (protected by @inner_lock)
|
||||
* @max_threads: cap on number of binder threads
|
||||
* (protected by @inner_lock)
|
||||
* @requested_threads: number of binder threads requested but not
|
||||
* yet started. In current implementation, can
|
||||
* only be 0 or 1.
|
||||
* (protected by @inner_lock)
|
||||
* @requested_threads_started: number binder threads started
|
||||
* (protected by @inner_lock)
|
||||
* @tmp_ref: temporary reference to indicate proc is in use
|
||||
* (protected by @inner_lock)
|
||||
* @default_priority: default scheduler priority
|
||||
* (invariant after initialized)
|
||||
* @debugfs_entry: debugfs node
|
||||
* @alloc: binder allocator bookkeeping
|
||||
* @context: binder_context for this proc
|
||||
* (invariant after initialized)
|
||||
* @inner_lock: can nest under outer_lock and/or node lock
|
||||
* @outer_lock: no nesting under innor or node lock
|
||||
* Lock order: 1) outer, 2) node, 3) inner
|
||||
* @binderfs_entry: process-specific binderfs log file
|
||||
*
|
||||
* Bookkeeping structure for binder processes
|
||||
*/
|
||||
struct binder_proc {
|
||||
struct hlist_node proc_node;
|
||||
struct rb_root threads;
|
||||
struct rb_root nodes;
|
||||
struct rb_root refs_by_desc;
|
||||
struct rb_root refs_by_node;
|
||||
struct list_head waiting_threads;
|
||||
int pid;
|
||||
struct task_struct *tsk;
|
||||
struct hlist_node deferred_work_node;
|
||||
int deferred_work;
|
||||
bool is_dead;
|
||||
|
||||
struct list_head todo;
|
||||
struct binder_stats stats;
|
||||
struct list_head delivered_death;
|
||||
int max_threads;
|
||||
int requested_threads;
|
||||
int requested_threads_started;
|
||||
int tmp_ref;
|
||||
long default_priority;
|
||||
struct dentry *debugfs_entry;
|
||||
struct binder_alloc alloc;
|
||||
struct binder_context *context;
|
||||
spinlock_t inner_lock;
|
||||
spinlock_t outer_lock;
|
||||
struct dentry *binderfs_entry;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_thread - binder thread bookkeeping
|
||||
* @proc: binder process for this thread
|
||||
* (invariant after initialization)
|
||||
* @rb_node: element for proc->threads rbtree
|
||||
* (protected by @proc->inner_lock)
|
||||
* @waiting_thread_node: element for @proc->waiting_threads list
|
||||
* (protected by @proc->inner_lock)
|
||||
* @pid: PID for this thread
|
||||
* (invariant after initialization)
|
||||
* @looper: bitmap of looping state
|
||||
* (only accessed by this thread)
|
||||
* @looper_needs_return: looping thread needs to exit driver
|
||||
* (no lock needed)
|
||||
* @transaction_stack: stack of in-progress transactions for this thread
|
||||
* (protected by @proc->inner_lock)
|
||||
* @todo: list of work to do for this thread
|
||||
* (protected by @proc->inner_lock)
|
||||
* @process_todo: whether work in @todo should be processed
|
||||
* (protected by @proc->inner_lock)
|
||||
* @return_error: transaction errors reported by this thread
|
||||
* (only accessed by this thread)
|
||||
* @reply_error: transaction errors reported by target thread
|
||||
* (protected by @proc->inner_lock)
|
||||
* @wait: wait queue for thread work
|
||||
* @stats: per-thread statistics
|
||||
* (atomics, no lock needed)
|
||||
* @tmp_ref: temporary reference to indicate thread is in use
|
||||
* (atomic since @proc->inner_lock cannot
|
||||
* always be acquired)
|
||||
* @is_dead: thread is dead and awaiting free
|
||||
* when outstanding transactions are cleaned up
|
||||
* (protected by @proc->inner_lock)
|
||||
*
|
||||
* Bookkeeping structure for binder threads.
|
||||
*/
|
||||
struct binder_thread {
|
||||
struct binder_proc *proc;
|
||||
struct rb_node rb_node;
|
||||
struct list_head waiting_thread_node;
|
||||
int pid;
|
||||
int looper; /* only modified by this thread */
|
||||
bool looper_need_return; /* can be written by other thread */
|
||||
struct binder_transaction *transaction_stack;
|
||||
struct list_head todo;
|
||||
bool process_todo;
|
||||
struct binder_error return_error;
|
||||
struct binder_error reply_error;
|
||||
wait_queue_head_t wait;
|
||||
struct binder_stats stats;
|
||||
atomic_t tmp_ref;
|
||||
bool is_dead;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_txn_fd_fixup - transaction fd fixup list element
|
||||
* @fixup_entry: list entry
|
||||
* @file: struct file to be associated with new fd
|
||||
* @offset: offset in buffer data to this fixup
|
||||
*
|
||||
* List element for fd fixups in a transaction. Since file
|
||||
* descriptors need to be allocated in the context of the
|
||||
* target process, we pass each fd to be processed in this
|
||||
* struct.
|
||||
*/
|
||||
struct binder_txn_fd_fixup {
|
||||
struct list_head fixup_entry;
|
||||
struct file *file;
|
||||
size_t offset;
|
||||
};
|
||||
|
||||
struct binder_transaction {
|
||||
int debug_id;
|
||||
struct binder_work work;
|
||||
struct binder_thread *from;
|
||||
struct binder_transaction *from_parent;
|
||||
struct binder_proc *to_proc;
|
||||
struct binder_thread *to_thread;
|
||||
struct binder_transaction *to_parent;
|
||||
unsigned need_reply:1;
|
||||
/* unsigned is_dead:1; */ /* not used at the moment */
|
||||
|
||||
struct binder_buffer *buffer;
|
||||
unsigned int code;
|
||||
unsigned int flags;
|
||||
long priority;
|
||||
long saved_priority;
|
||||
kuid_t sender_euid;
|
||||
struct list_head fd_fixups;
|
||||
binder_uintptr_t security_ctx;
|
||||
/**
|
||||
* @lock: protects @from, @to_proc, and @to_thread
|
||||
*
|
||||
* @from, @to_proc, and @to_thread can be set to NULL
|
||||
* during thread teardown
|
||||
*/
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_object - union of flat binder object types
|
||||
* @hdr: generic object header
|
||||
* @fbo: binder object (nodes and refs)
|
||||
* @fdo: file descriptor object
|
||||
* @bbo: binder buffer pointer
|
||||
* @fdao: file descriptor array
|
||||
*
|
||||
* Used for type-independent object copies
|
||||
*/
|
||||
struct binder_object {
|
||||
union {
|
||||
struct binder_object_header hdr;
|
||||
struct flat_binder_object fbo;
|
||||
struct binder_fd_object fdo;
|
||||
struct binder_buffer_object bbo;
|
||||
struct binder_fd_array_object fdao;
|
||||
};
|
||||
};
|
||||
|
||||
extern struct binder_transaction_log binder_transaction_log;
|
||||
extern struct binder_transaction_log binder_transaction_log_failed;
|
||||
#endif /* _LINUX_BINDER_INTERNAL_H */
|
||||
|
|
|
@ -95,6 +95,35 @@ TRACE_EVENT(binder_wait_for_work,
|
|||
__entry->thread_todo)
|
||||
);
|
||||
|
||||
TRACE_EVENT(binder_txn_latency_free,
|
||||
TP_PROTO(struct binder_transaction *t,
|
||||
int from_proc, int from_thread,
|
||||
int to_proc, int to_thread),
|
||||
TP_ARGS(t, from_proc, from_thread, to_proc, to_thread),
|
||||
TP_STRUCT__entry(
|
||||
__field(int, debug_id)
|
||||
__field(int, from_proc)
|
||||
__field(int, from_thread)
|
||||
__field(int, to_proc)
|
||||
__field(int, to_thread)
|
||||
__field(unsigned int, code)
|
||||
__field(unsigned int, flags)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->debug_id = t->debug_id;
|
||||
__entry->from_proc = from_proc;
|
||||
__entry->from_thread = from_thread;
|
||||
__entry->to_proc = to_proc;
|
||||
__entry->to_thread = to_thread;
|
||||
__entry->code = t->code;
|
||||
__entry->flags = t->flags;
|
||||
),
|
||||
TP_printk("transaction=%d from %d:%d to %d:%d flags=0x%x code=0x%x",
|
||||
__entry->debug_id, __entry->from_proc, __entry->from_thread,
|
||||
__entry->to_proc, __entry->to_thread, __entry->code,
|
||||
__entry->flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(binder_transaction,
|
||||
TP_PROTO(bool reply, struct binder_transaction *t,
|
||||
struct binder_node *target_node),
|
||||
|
|
|
@ -670,9 +670,7 @@ int dprc_setup(struct fsl_mc_device *mc_dev)
|
|||
goto error_cleanup_open;
|
||||
}
|
||||
|
||||
if (major_ver < DPRC_MIN_VER_MAJOR ||
|
||||
(major_ver == DPRC_MIN_VER_MAJOR &&
|
||||
minor_ver < DPRC_MIN_VER_MINOR)) {
|
||||
if (major_ver < DPRC_MIN_VER_MAJOR) {
|
||||
dev_err(&mc_dev->dev,
|
||||
"ERROR: DPRC version %d.%d not supported\n",
|
||||
major_ver, minor_ver);
|
||||
|
|
|
@ -576,6 +576,8 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
|
|||
rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
|
||||
region_desc->base_offset = le64_to_cpu(rsp_params->base_offset);
|
||||
region_desc->size = le32_to_cpu(rsp_params->size);
|
||||
region_desc->type = rsp_params->type;
|
||||
region_desc->flags = le32_to_cpu(rsp_params->flags);
|
||||
if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 3))
|
||||
region_desc->base_address = le64_to_cpu(rsp_params->base_addr);
|
||||
else
|
||||
|
|
|
@ -292,8 +292,10 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
|
|||
goto error;
|
||||
|
||||
mc_adev = resource->data;
|
||||
if (!mc_adev)
|
||||
if (!mc_adev) {
|
||||
error = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
mc_adev->consumer_link = device_link_add(&mc_dev->dev,
|
||||
&mc_adev->dev,
|
||||
|
|
|
@ -60,6 +60,9 @@ struct fsl_mc_addr_translation_range {
|
|||
phys_addr_t start_phys_addr;
|
||||
};
|
||||
|
||||
#define FSL_MC_GCR1 0x0
|
||||
#define GCR1_P1_STOP BIT(31)
|
||||
|
||||
#define FSL_MC_FAPR 0x28
|
||||
#define MC_FAPR_PL BIT(18)
|
||||
#define MC_FAPR_BMT BIT(17)
|
||||
|
@ -967,24 +970,42 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
|
|||
platform_set_drvdata(pdev, mc);
|
||||
|
||||
plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
if (plat_res)
|
||||
if (plat_res) {
|
||||
mc->fsl_mc_regs = devm_ioremap_resource(&pdev->dev, plat_res);
|
||||
if (IS_ERR(mc->fsl_mc_regs))
|
||||
return PTR_ERR(mc->fsl_mc_regs);
|
||||
}
|
||||
|
||||
if (mc->fsl_mc_regs && IS_ENABLED(CONFIG_ACPI) &&
|
||||
!dev_of_node(&pdev->dev)) {
|
||||
mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR);
|
||||
if (mc->fsl_mc_regs) {
|
||||
/*
|
||||
* HW ORs the PL and BMT bit, places the result in bit 15 of
|
||||
* the StreamID and ORs in the ICID. Calculate it accordingly.
|
||||
* Some bootloaders pause the MC firmware before booting the
|
||||
* kernel so that MC will not cause faults as soon as the
|
||||
* SMMU probes due to the fact that there's no configuration
|
||||
* in place for MC.
|
||||
* At this point MC should have all its SMMU setup done so make
|
||||
* sure it is resumed.
|
||||
*/
|
||||
mc_stream_id = (mc_stream_id & 0xffff) |
|
||||
writel(readl(mc->fsl_mc_regs + FSL_MC_GCR1) & (~GCR1_P1_STOP),
|
||||
mc->fsl_mc_regs + FSL_MC_GCR1);
|
||||
|
||||
if (IS_ENABLED(CONFIG_ACPI) && !dev_of_node(&pdev->dev)) {
|
||||
mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR);
|
||||
/*
|
||||
* HW ORs the PL and BMT bit, places the result in bit
|
||||
* 14 of the StreamID and ORs in the ICID. Calculate it
|
||||
* accordingly.
|
||||
*/
|
||||
mc_stream_id = (mc_stream_id & 0xffff) |
|
||||
((mc_stream_id & (MC_FAPR_PL | MC_FAPR_BMT)) ?
|
||||
0x4000 : 0);
|
||||
error = acpi_dma_configure_id(&pdev->dev, DEV_DMA_COHERENT,
|
||||
&mc_stream_id);
|
||||
if (error)
|
||||
dev_warn(&pdev->dev, "failed to configure dma: %d.\n",
|
||||
error);
|
||||
BIT(14) : 0);
|
||||
error = acpi_dma_configure_id(&pdev->dev,
|
||||
DEV_DMA_COHERENT,
|
||||
&mc_stream_id);
|
||||
if (error)
|
||||
dev_warn(&pdev->dev,
|
||||
"failed to configure dma: %d.\n",
|
||||
error);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -211,12 +211,13 @@ struct dprc_cmd_get_obj_region {
|
|||
|
||||
struct dprc_rsp_get_obj_region {
|
||||
/* response word 0 */
|
||||
__le64 pad;
|
||||
__le64 pad0;
|
||||
/* response word 1 */
|
||||
__le64 base_offset;
|
||||
/* response word 2 */
|
||||
__le32 size;
|
||||
__le32 pad2;
|
||||
u8 type;
|
||||
u8 pad2[3];
|
||||
/* response word 3 */
|
||||
__le32 flags;
|
||||
__le32 pad3;
|
||||
|
|
|
@ -20,3 +20,12 @@ config MHI_BUS_DEBUG
|
|||
Enable debugfs support for use with the MHI transport. Allows
|
||||
reading and/or modifying some values within the MHI controller
|
||||
for debug and test purposes.
|
||||
|
||||
config MHI_BUS_PCI_GENERIC
|
||||
tristate "MHI PCI controller driver"
|
||||
depends on MHI_BUS
|
||||
depends on PCI
|
||||
help
|
||||
This driver provides MHI PCI controller driver for devices such as
|
||||
Qualcomm SDX55 based PCIe modems.
|
||||
|
||||
|
|
|
@ -1,2 +1,6 @@
|
|||
# core layer
|
||||
obj-y += core/
|
||||
|
||||
obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o
|
||||
mhi_pci_generic-y += pci_generic.o
|
||||
|
||||
|
|
|
@ -92,6 +92,9 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
|
|||
* image download completion.
|
||||
*/
|
||||
ee = mhi_get_exec_env(mhi_cntrl);
|
||||
if (ee == MHI_EE_MAX)
|
||||
goto error_exit_rddm;
|
||||
|
||||
if (ee != MHI_EE_RDDM) {
|
||||
dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n");
|
||||
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
|
||||
|
@ -139,15 +142,17 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
|
|||
ee = mhi_get_exec_env(mhi_cntrl);
|
||||
ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
|
||||
|
||||
dev_err(dev, "Did not complete RDDM transfer\n");
|
||||
dev_err(dev, "Current EE: %s\n", TO_MHI_EXEC_STR(ee));
|
||||
dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status);
|
||||
|
||||
error_exit_rddm:
|
||||
dev_err(dev, "RDDM transfer failed. Current EE: %s\n",
|
||||
TO_MHI_EXEC_STR(ee));
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Download RDDM image from device */
|
||||
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
|
||||
int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic)
|
||||
{
|
||||
void __iomem *base = mhi_cntrl->bhie;
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
|
@ -169,9 +174,9 @@ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
|
|||
|
||||
return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_download_rddm_img);
|
||||
EXPORT_SYMBOL_GPL(mhi_download_rddm_image);
|
||||
|
||||
static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
|
||||
static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
|
||||
const struct mhi_buf *mhi_buf)
|
||||
{
|
||||
void __iomem *base = mhi_cntrl->bhie;
|
||||
|
@ -187,7 +192,7 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
|
|||
}
|
||||
|
||||
sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK);
|
||||
dev_dbg(dev, "Starting AMSS download via BHIe. Sequence ID:%u\n",
|
||||
dev_dbg(dev, "Starting image download via BHIe. Sequence ID: %u\n",
|
||||
sequence_id);
|
||||
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
|
||||
upper_32_bits(mhi_buf->dma_addr));
|
||||
|
@ -218,7 +223,7 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
|
|||
return (!ret) ? -ETIMEDOUT : 0;
|
||||
}
|
||||
|
||||
static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
|
||||
static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
|
||||
dma_addr_t dma_addr,
|
||||
size_t size)
|
||||
{
|
||||
|
@ -245,7 +250,7 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
|
|||
}
|
||||
|
||||
session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK);
|
||||
dev_dbg(dev, "Starting SBL download via BHI. Session ID:%u\n",
|
||||
dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n",
|
||||
session_id);
|
||||
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
|
||||
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
|
||||
|
@ -365,7 +370,6 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
|
|||
size_t remainder = firmware->size;
|
||||
size_t to_cpy;
|
||||
const u8 *buf = firmware->data;
|
||||
int i = 0;
|
||||
struct mhi_buf *mhi_buf = img_info->mhi_buf;
|
||||
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
|
||||
|
||||
|
@ -377,7 +381,6 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
|
|||
|
||||
buf += to_cpy;
|
||||
remainder -= to_cpy;
|
||||
i++;
|
||||
bhi_vec++;
|
||||
mhi_buf++;
|
||||
}
|
||||
|
@ -425,13 +428,13 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
|
|||
!mhi_cntrl->seg_len))) {
|
||||
dev_err(dev,
|
||||
"No firmware image defined or !sbl_size || !seg_len\n");
|
||||
return;
|
||||
goto error_fw_load;
|
||||
}
|
||||
|
||||
ret = request_firmware(&firmware, fw_name, dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "Error loading firmware: %d\n", ret);
|
||||
return;
|
||||
goto error_fw_load;
|
||||
}
|
||||
|
||||
size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
|
||||
|
@ -443,25 +446,25 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
|
|||
buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
release_firmware(firmware);
|
||||
return;
|
||||
goto error_fw_load;
|
||||
}
|
||||
|
||||
/* Download SBL image */
|
||||
/* Download image using BHI */
|
||||
memcpy(buf, firmware->data, size);
|
||||
ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
|
||||
ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size);
|
||||
mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
|
||||
|
||||
if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL)
|
||||
release_firmware(firmware);
|
||||
|
||||
/* Error or in EDL mode, we're done */
|
||||
if (ret) {
|
||||
dev_err(dev, "MHI did not load SBL, ret:%d\n", ret);
|
||||
return;
|
||||
dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret);
|
||||
release_firmware(firmware);
|
||||
goto error_fw_load;
|
||||
}
|
||||
|
||||
if (mhi_cntrl->ee == MHI_EE_EDL)
|
||||
if (mhi_cntrl->ee == MHI_EE_EDL) {
|
||||
release_firmware(firmware);
|
||||
return;
|
||||
}
|
||||
|
||||
write_lock_irq(&mhi_cntrl->pm_lock);
|
||||
mhi_cntrl->dev_state = MHI_STATE_RESET;
|
||||
|
@ -474,13 +477,17 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
|
|||
if (mhi_cntrl->fbc_download) {
|
||||
ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
|
||||
firmware->size);
|
||||
if (ret)
|
||||
goto error_alloc_fw_table;
|
||||
if (ret) {
|
||||
release_firmware(firmware);
|
||||
goto error_fw_load;
|
||||
}
|
||||
|
||||
/* Load the firmware into BHIE vec table */
|
||||
mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
|
||||
}
|
||||
|
||||
release_firmware(firmware);
|
||||
|
||||
fw_load_ee_pthru:
|
||||
/* Transitioning into MHI RESET->READY state */
|
||||
ret = mhi_ready_state_transition(mhi_cntrl);
|
||||
|
@ -490,7 +497,7 @@ fw_load_ee_pthru:
|
|||
|
||||
if (ret) {
|
||||
dev_err(dev, "MHI did not enter READY state\n");
|
||||
goto error_read;
|
||||
goto error_ready_state;
|
||||
}
|
||||
|
||||
/* Wait for the SBL event */
|
||||
|
@ -501,25 +508,27 @@ fw_load_ee_pthru:
|
|||
|
||||
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
|
||||
dev_err(dev, "MHI did not enter SBL\n");
|
||||
goto error_read;
|
||||
goto error_ready_state;
|
||||
}
|
||||
|
||||
/* Start full firmware image download */
|
||||
image_info = mhi_cntrl->fbc_image;
|
||||
ret = mhi_fw_load_amss(mhi_cntrl,
|
||||
ret = mhi_fw_load_bhie(mhi_cntrl,
|
||||
/* Vector table is the last entry */
|
||||
&image_info->mhi_buf[image_info->entries - 1]);
|
||||
if (ret)
|
||||
dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
|
||||
|
||||
release_firmware(firmware);
|
||||
if (ret) {
|
||||
dev_err(dev, "MHI did not load image over BHIe, ret: %d\n",
|
||||
ret);
|
||||
goto error_fw_load;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
error_read:
|
||||
error_ready_state:
|
||||
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
|
||||
mhi_cntrl->fbc_image = NULL;
|
||||
|
||||
error_alloc_fw_table:
|
||||
release_firmware(firmware);
|
||||
error_fw_load:
|
||||
mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
|
||||
wake_up_all(&mhi_cntrl->state_event);
|
||||
}
|
||||
|
|
|
@ -159,7 +159,9 @@ static int mhi_debugfs_devices_show(struct seq_file *m, void *d)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
device_for_each_child(mhi_cntrl->cntrl_dev, m, mhi_device_info_show);
|
||||
/* Show controller and client(s) info */
|
||||
mhi_device_info_show(&mhi_cntrl->mhi_dev->dev, m);
|
||||
device_for_each_child(&mhi_cntrl->mhi_dev->dev, m, mhi_device_info_show);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mhi.h>
|
||||
|
@ -18,6 +19,8 @@
|
|||
#include <linux/wait.h>
|
||||
#include "internal.h"
|
||||
|
||||
static DEFINE_IDA(mhi_controller_ida);
|
||||
|
||||
const char * const mhi_ee_str[MHI_EE_MAX] = {
|
||||
[MHI_EE_PBL] = "PBL",
|
||||
[MHI_EE_SBL] = "SBL",
|
||||
|
@ -610,7 +613,7 @@ static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
|
|||
{
|
||||
struct mhi_event *mhi_event;
|
||||
const struct mhi_event_config *event_cfg;
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
struct device *dev = mhi_cntrl->cntrl_dev;
|
||||
int i, num;
|
||||
|
||||
num = config->num_events;
|
||||
|
@ -692,7 +695,7 @@ static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
|
|||
const struct mhi_controller_config *config)
|
||||
{
|
||||
const struct mhi_channel_config *ch_cfg;
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
struct device *dev = mhi_cntrl->cntrl_dev;
|
||||
int i;
|
||||
u32 chan;
|
||||
|
||||
|
@ -857,7 +860,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
|
|||
|
||||
if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
|
||||
!mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
|
||||
!mhi_cntrl->write_reg)
|
||||
!mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs)
|
||||
return -EINVAL;
|
||||
|
||||
ret = parse_config(mhi_cntrl, config);
|
||||
|
@ -868,7 +871,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
|
|||
sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
|
||||
if (!mhi_cntrl->mhi_cmd) {
|
||||
ret = -ENOMEM;
|
||||
goto error_alloc_cmd;
|
||||
goto err_free_event;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&mhi_cntrl->transition_list);
|
||||
|
@ -879,6 +882,14 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
|
|||
INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
|
||||
init_waitqueue_head(&mhi_cntrl->state_event);
|
||||
|
||||
mhi_cntrl->hiprio_wq = alloc_ordered_workqueue
|
||||
("mhi_hiprio_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI);
|
||||
if (!mhi_cntrl->hiprio_wq) {
|
||||
dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_free_cmd;
|
||||
}
|
||||
|
||||
mhi_cmd = mhi_cntrl->mhi_cmd;
|
||||
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
|
||||
spin_lock_init(&mhi_cmd->lock);
|
||||
|
@ -922,7 +933,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
|
|||
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
|
||||
SOC_HW_VERSION_OFFS, &soc_info);
|
||||
if (ret)
|
||||
goto error_alloc_dev;
|
||||
goto err_destroy_wq;
|
||||
|
||||
mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
|
||||
SOC_HW_VERSION_FAM_NUM_SHFT;
|
||||
|
@ -933,25 +944,31 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
|
|||
mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
|
||||
SOC_HW_VERSION_MINOR_VER_SHFT;
|
||||
|
||||
mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
|
||||
if (mhi_cntrl->index < 0) {
|
||||
ret = mhi_cntrl->index;
|
||||
goto err_destroy_wq;
|
||||
}
|
||||
|
||||
/* Register controller with MHI bus */
|
||||
mhi_dev = mhi_alloc_device(mhi_cntrl);
|
||||
if (IS_ERR(mhi_dev)) {
|
||||
dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
|
||||
ret = PTR_ERR(mhi_dev);
|
||||
goto error_alloc_dev;
|
||||
goto err_ida_free;
|
||||
}
|
||||
|
||||
mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
|
||||
mhi_dev->mhi_cntrl = mhi_cntrl;
|
||||
dev_set_name(&mhi_dev->dev, "%s", dev_name(mhi_cntrl->cntrl_dev));
|
||||
mhi_dev->name = dev_name(mhi_cntrl->cntrl_dev);
|
||||
dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
|
||||
mhi_dev->name = dev_name(&mhi_dev->dev);
|
||||
|
||||
/* Init wakeup source */
|
||||
device_init_wakeup(&mhi_dev->dev, true);
|
||||
|
||||
ret = device_add(&mhi_dev->dev);
|
||||
if (ret)
|
||||
goto error_add_dev;
|
||||
goto err_release_dev;
|
||||
|
||||
mhi_cntrl->mhi_dev = mhi_dev;
|
||||
|
||||
|
@ -959,15 +976,17 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
|
|||
|
||||
return 0;
|
||||
|
||||
error_add_dev:
|
||||
err_release_dev:
|
||||
put_device(&mhi_dev->dev);
|
||||
|
||||
error_alloc_dev:
|
||||
err_ida_free:
|
||||
ida_free(&mhi_controller_ida, mhi_cntrl->index);
|
||||
err_destroy_wq:
|
||||
destroy_workqueue(mhi_cntrl->hiprio_wq);
|
||||
err_free_cmd:
|
||||
kfree(mhi_cntrl->mhi_cmd);
|
||||
|
||||
error_alloc_cmd:
|
||||
vfree(mhi_cntrl->mhi_chan);
|
||||
err_free_event:
|
||||
kfree(mhi_cntrl->mhi_event);
|
||||
vfree(mhi_cntrl->mhi_chan);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -981,6 +1000,7 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
|
|||
|
||||
mhi_destroy_debugfs(mhi_cntrl);
|
||||
|
||||
destroy_workqueue(mhi_cntrl->hiprio_wq);
|
||||
kfree(mhi_cntrl->mhi_cmd);
|
||||
kfree(mhi_cntrl->mhi_event);
|
||||
|
||||
|
@ -995,6 +1015,8 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
|
|||
|
||||
device_del(&mhi_dev->dev);
|
||||
put_device(&mhi_dev->dev);
|
||||
|
||||
ida_free(&mhi_controller_ida, mhi_cntrl->index);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_unregister_controller);
|
||||
|
||||
|
@ -1121,7 +1143,15 @@ struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
|
|||
device_initialize(dev);
|
||||
dev->bus = &mhi_bus_type;
|
||||
dev->release = mhi_release_device;
|
||||
dev->parent = mhi_cntrl->cntrl_dev;
|
||||
|
||||
if (mhi_cntrl->mhi_dev) {
|
||||
/* for MHI client devices, parent is the MHI controller device */
|
||||
dev->parent = &mhi_cntrl->mhi_dev->dev;
|
||||
} else {
|
||||
/* for MHI controller device, parent is the bus device (e.g. pci device) */
|
||||
dev->parent = mhi_cntrl->cntrl_dev;
|
||||
}
|
||||
|
||||
mhi_dev->mhi_cntrl = mhi_cntrl;
|
||||
mhi_dev->dev_wake = 0;
|
||||
|
||||
|
@ -1267,10 +1297,8 @@ static int mhi_driver_remove(struct device *dev)
|
|||
mutex_unlock(&mhi_chan->mutex);
|
||||
}
|
||||
|
||||
read_lock_bh(&mhi_cntrl->pm_lock);
|
||||
while (mhi_dev->dev_wake)
|
||||
mhi_device_put(mhi_dev);
|
||||
read_unlock_bh(&mhi_cntrl->pm_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -153,8 +153,8 @@ extern struct bus_type mhi_bus_type;
|
|||
#define BHI_SERIALNU (0x40)
|
||||
#define BHI_SBLANTIROLLVER (0x44)
|
||||
#define BHI_NUMSEG (0x48)
|
||||
#define BHI_MSMHWID(n) (0x4C + (0x4 * n))
|
||||
#define BHI_OEMPKHASH(n) (0x64 + (0x4 * n))
|
||||
#define BHI_MSMHWID(n) (0x4C + (0x4 * (n)))
|
||||
#define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n)))
|
||||
#define BHI_RSVD5 (0xC4)
|
||||
#define BHI_STATUS_MASK (0xC0000000)
|
||||
#define BHI_STATUS_SHIFT (30)
|
||||
|
@ -608,12 +608,10 @@ enum mhi_pm_state __must_check mhi_tryset_pm_state(
|
|||
struct mhi_controller *mhi_cntrl,
|
||||
enum mhi_pm_state state);
|
||||
const char *to_mhi_pm_state_str(enum mhi_pm_state state);
|
||||
enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
|
||||
int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
|
||||
enum dev_st_transition state);
|
||||
void mhi_pm_st_worker(struct work_struct *work);
|
||||
void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
|
||||
void mhi_fw_load_worker(struct work_struct *work);
|
||||
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
|
||||
int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
|
||||
void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
|
||||
|
|
|
@ -123,6 +123,7 @@ enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
|
|||
|
||||
return (ret) ? MHI_EE_MAX : exec;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_get_exec_env);
|
||||
|
||||
enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
|
@ -132,6 +133,7 @@ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
|
|||
MHISTATUS_MHISTATE_SHIFT, &state);
|
||||
return ret ? MHI_STATE_MAX : state;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
|
||||
|
||||
int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
|
||||
struct mhi_buf_info *buf_info)
|
||||
|
@ -329,7 +331,7 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl)
|
|||
/* Channel name is same for both UL and DL */
|
||||
mhi_dev->name = mhi_chan->name;
|
||||
dev_set_name(&mhi_dev->dev, "%s_%s",
|
||||
dev_name(mhi_cntrl->cntrl_dev),
|
||||
dev_name(&mhi_cntrl->mhi_dev->dev),
|
||||
mhi_dev->name);
|
||||
|
||||
/* Init wakeup source if available */
|
||||
|
@ -399,6 +401,10 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
|
|||
|
||||
/* If device supports RDDM don't bother processing SYS error */
|
||||
if (mhi_cntrl->rddm_image) {
|
||||
/* host may be performing a device power down already */
|
||||
if (!mhi_is_active(mhi_cntrl))
|
||||
goto exit_intvec;
|
||||
|
||||
if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
|
||||
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
|
||||
wake_up_all(&mhi_cntrl->state_event);
|
||||
|
@ -735,11 +741,6 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
|
|||
{
|
||||
enum mhi_pm_state new_state;
|
||||
|
||||
/* skip SYS_ERROR handling if RDDM supported */
|
||||
if (mhi_cntrl->ee == MHI_EE_RDDM ||
|
||||
mhi_cntrl->rddm_image)
|
||||
break;
|
||||
|
||||
dev_dbg(dev, "System error detected\n");
|
||||
write_lock_irq(&mhi_cntrl->pm_lock);
|
||||
new_state = mhi_tryset_pm_state(mhi_cntrl,
|
||||
|
@ -1235,7 +1236,8 @@ static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
|
|||
/* no more processing events for this channel */
|
||||
mutex_lock(&mhi_chan->mutex);
|
||||
write_lock_irq(&mhi_chan->lock);
|
||||
if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
|
||||
if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
|
||||
mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
|
||||
write_unlock_irq(&mhi_chan->lock);
|
||||
mutex_unlock(&mhi_chan->mutex);
|
||||
return;
|
||||
|
|
|
@ -37,9 +37,10 @@
|
|||
* M0 -> FW_DL_ERR
|
||||
* M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
|
||||
* L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
|
||||
* L2: SHUTDOWN_PROCESS -> DISABLE
|
||||
* L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
|
||||
* SHUTDOWN_PROCESS -> DISABLE
|
||||
* L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
|
||||
* LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
|
||||
* LD_ERR_FATAL_DETECT -> DISABLE
|
||||
*/
|
||||
static struct mhi_pm_transitions const dev_state_transitions[] = {
|
||||
/* L0 States */
|
||||
|
@ -72,7 +73,7 @@ static struct mhi_pm_transitions const dev_state_transitions[] = {
|
|||
{
|
||||
MHI_PM_M3,
|
||||
MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
|
||||
MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
|
||||
MHI_PM_LD_ERR_FATAL_DETECT
|
||||
},
|
||||
{
|
||||
MHI_PM_M3_EXIT,
|
||||
|
@ -103,7 +104,7 @@ static struct mhi_pm_transitions const dev_state_transitions[] = {
|
|||
/* L3 States */
|
||||
{
|
||||
MHI_PM_LD_ERR_FATAL_DETECT,
|
||||
MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
|
||||
MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -383,10 +384,14 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
|
|||
write_lock_irq(&mhi_cntrl->pm_lock);
|
||||
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
|
||||
mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
|
||||
write_unlock_irq(&mhi_cntrl->pm_lock);
|
||||
|
||||
if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
|
||||
if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
|
||||
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
|
||||
write_unlock_irq(&mhi_cntrl->pm_lock);
|
||||
wake_up_all(&mhi_cntrl->state_event);
|
||||
return -EIO;
|
||||
}
|
||||
write_unlock_irq(&mhi_cntrl->pm_lock);
|
||||
|
||||
wake_up_all(&mhi_cntrl->state_event);
|
||||
|
||||
|
@ -440,11 +445,10 @@ error_mission_mode:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Handle SYS_ERR and Shutdown transitions */
|
||||
static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
|
||||
enum mhi_pm_state transition_state)
|
||||
/* Handle shutdown transitions */
|
||||
static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
enum mhi_pm_state cur_state, prev_state;
|
||||
enum mhi_pm_state cur_state;
|
||||
struct mhi_event *mhi_event;
|
||||
struct mhi_cmd_ctxt *cmd_ctxt;
|
||||
struct mhi_cmd *mhi_cmd;
|
||||
|
@ -452,37 +456,13 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
|
|||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
int ret, i;
|
||||
|
||||
dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
|
||||
to_mhi_pm_state_str(mhi_cntrl->pm_state),
|
||||
to_mhi_pm_state_str(transition_state));
|
||||
|
||||
/* We must notify MHI control driver so it can clean up first */
|
||||
if (transition_state == MHI_PM_SYS_ERR_PROCESS)
|
||||
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
|
||||
dev_dbg(dev, "Processing disable transition with PM state: %s\n",
|
||||
to_mhi_pm_state_str(mhi_cntrl->pm_state));
|
||||
|
||||
mutex_lock(&mhi_cntrl->pm_mutex);
|
||||
write_lock_irq(&mhi_cntrl->pm_lock);
|
||||
prev_state = mhi_cntrl->pm_state;
|
||||
cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
|
||||
if (cur_state == transition_state) {
|
||||
mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
|
||||
mhi_cntrl->dev_state = MHI_STATE_RESET;
|
||||
}
|
||||
write_unlock_irq(&mhi_cntrl->pm_lock);
|
||||
|
||||
/* Wake up threads waiting for state transition */
|
||||
wake_up_all(&mhi_cntrl->state_event);
|
||||
|
||||
if (cur_state != transition_state) {
|
||||
dev_err(dev, "Failed to transition to state: %s from: %s\n",
|
||||
to_mhi_pm_state_str(transition_state),
|
||||
to_mhi_pm_state_str(cur_state));
|
||||
mutex_unlock(&mhi_cntrl->pm_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Trigger MHI RESET so that the device will not access host memory */
|
||||
if (MHI_REG_ACCESS_VALID(prev_state)) {
|
||||
if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
|
||||
u32 in_reset = -1;
|
||||
unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
|
||||
|
||||
|
@ -498,11 +478,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
|
|||
MHICTRL_RESET_SHIFT,
|
||||
&in_reset) ||
|
||||
!in_reset, timeout);
|
||||
if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) {
|
||||
if (!ret || in_reset)
|
||||
dev_err(dev, "Device failed to exit MHI Reset state\n");
|
||||
mutex_unlock(&mhi_cntrl->pm_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Device will clear BHI_INTVEC as a part of RESET processing,
|
||||
|
@ -517,6 +494,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
|
|||
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
|
||||
if (mhi_event->offload_ev)
|
||||
continue;
|
||||
free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
|
||||
tasklet_kill(&mhi_event->task);
|
||||
}
|
||||
|
||||
|
@ -526,7 +504,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
|
|||
wake_up_all(&mhi_cntrl->state_event);
|
||||
|
||||
dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
|
||||
device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device);
|
||||
device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
|
||||
|
||||
mutex_lock(&mhi_cntrl->pm_mutex);
|
||||
|
||||
|
@ -562,19 +540,142 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
|
|||
er_ctxt->wp = er_ctxt->rbase;
|
||||
}
|
||||
|
||||
if (cur_state == MHI_PM_SYS_ERR_PROCESS) {
|
||||
mhi_ready_state_transition(mhi_cntrl);
|
||||
} else {
|
||||
/* Move to disable state */
|
||||
write_lock_irq(&mhi_cntrl->pm_lock);
|
||||
cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
|
||||
write_unlock_irq(&mhi_cntrl->pm_lock);
|
||||
if (unlikely(cur_state != MHI_PM_DISABLE))
|
||||
dev_err(dev, "Error moving from PM state: %s to: %s\n",
|
||||
to_mhi_pm_state_str(cur_state),
|
||||
to_mhi_pm_state_str(MHI_PM_DISABLE));
|
||||
/* Move to disable state */
|
||||
write_lock_irq(&mhi_cntrl->pm_lock);
|
||||
cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
|
||||
write_unlock_irq(&mhi_cntrl->pm_lock);
|
||||
if (unlikely(cur_state != MHI_PM_DISABLE))
|
||||
dev_err(dev, "Error moving from PM state: %s to: %s\n",
|
||||
to_mhi_pm_state_str(cur_state),
|
||||
to_mhi_pm_state_str(MHI_PM_DISABLE));
|
||||
|
||||
dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
|
||||
to_mhi_pm_state_str(mhi_cntrl->pm_state),
|
||||
TO_MHI_STATE_STR(mhi_cntrl->dev_state));
|
||||
|
||||
mutex_unlock(&mhi_cntrl->pm_mutex);
|
||||
}
|
||||
|
||||
/* Handle system error transitions */
|
||||
static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
enum mhi_pm_state cur_state, prev_state;
|
||||
struct mhi_event *mhi_event;
|
||||
struct mhi_cmd_ctxt *cmd_ctxt;
|
||||
struct mhi_cmd *mhi_cmd;
|
||||
struct mhi_event_ctxt *er_ctxt;
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
int ret, i;
|
||||
|
||||
dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
|
||||
to_mhi_pm_state_str(mhi_cntrl->pm_state),
|
||||
to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
|
||||
|
||||
/* We must notify MHI control driver so it can clean up first */
|
||||
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
|
||||
|
||||
mutex_lock(&mhi_cntrl->pm_mutex);
|
||||
write_lock_irq(&mhi_cntrl->pm_lock);
|
||||
prev_state = mhi_cntrl->pm_state;
|
||||
cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
|
||||
write_unlock_irq(&mhi_cntrl->pm_lock);
|
||||
|
||||
if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
|
||||
dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
|
||||
to_mhi_pm_state_str(cur_state),
|
||||
to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
|
||||
goto exit_sys_error_transition;
|
||||
}
|
||||
|
||||
mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
|
||||
mhi_cntrl->dev_state = MHI_STATE_RESET;
|
||||
|
||||
/* Wake up threads waiting for state transition */
|
||||
wake_up_all(&mhi_cntrl->state_event);
|
||||
|
||||
/* Trigger MHI RESET so that the device will not access host memory */
|
||||
if (MHI_REG_ACCESS_VALID(prev_state)) {
|
||||
u32 in_reset = -1;
|
||||
unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
|
||||
|
||||
dev_dbg(dev, "Triggering MHI Reset in device\n");
|
||||
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
|
||||
|
||||
/* Wait for the reset bit to be cleared by the device */
|
||||
ret = wait_event_timeout(mhi_cntrl->state_event,
|
||||
mhi_read_reg_field(mhi_cntrl,
|
||||
mhi_cntrl->regs,
|
||||
MHICTRL,
|
||||
MHICTRL_RESET_MASK,
|
||||
MHICTRL_RESET_SHIFT,
|
||||
&in_reset) ||
|
||||
!in_reset, timeout);
|
||||
if (!ret || in_reset) {
|
||||
dev_err(dev, "Device failed to exit MHI Reset state\n");
|
||||
goto exit_sys_error_transition;
|
||||
}
|
||||
|
||||
/*
|
||||
* Device will clear BHI_INTVEC as a part of RESET processing,
|
||||
* hence re-program it
|
||||
*/
|
||||
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
|
||||
}
|
||||
|
||||
dev_dbg(dev,
|
||||
"Waiting for all pending event ring processing to complete\n");
|
||||
mhi_event = mhi_cntrl->mhi_event;
|
||||
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
|
||||
if (mhi_event->offload_ev)
|
||||
continue;
|
||||
tasklet_kill(&mhi_event->task);
|
||||
}
|
||||
|
||||
/* Release lock and wait for all pending threads to complete */
|
||||
mutex_unlock(&mhi_cntrl->pm_mutex);
|
||||
dev_dbg(dev, "Waiting for all pending threads to complete\n");
|
||||
wake_up_all(&mhi_cntrl->state_event);
|
||||
|
||||
dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
|
||||
device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
|
||||
|
||||
mutex_lock(&mhi_cntrl->pm_mutex);
|
||||
|
||||
WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
|
||||
WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
|
||||
|
||||
/* Reset the ev rings and cmd rings */
|
||||
dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
|
||||
mhi_cmd = mhi_cntrl->mhi_cmd;
|
||||
cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
|
||||
for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
|
||||
struct mhi_ring *ring = &mhi_cmd->ring;
|
||||
|
||||
ring->rp = ring->base;
|
||||
ring->wp = ring->base;
|
||||
cmd_ctxt->rp = cmd_ctxt->rbase;
|
||||
cmd_ctxt->wp = cmd_ctxt->rbase;
|
||||
}
|
||||
|
||||
mhi_event = mhi_cntrl->mhi_event;
|
||||
er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
|
||||
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
|
||||
mhi_event++) {
|
||||
struct mhi_ring *ring = &mhi_event->ring;
|
||||
|
||||
/* Skip offload events */
|
||||
if (mhi_event->offload_ev)
|
||||
continue;
|
||||
|
||||
ring->rp = ring->base;
|
||||
ring->wp = ring->base;
|
||||
er_ctxt->rp = er_ctxt->rbase;
|
||||
er_ctxt->wp = er_ctxt->rbase;
|
||||
}
|
||||
|
||||
mhi_ready_state_transition(mhi_cntrl);
|
||||
|
||||
exit_sys_error_transition:
|
||||
dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
|
||||
to_mhi_pm_state_str(mhi_cntrl->pm_state),
|
||||
TO_MHI_STATE_STR(mhi_cntrl->dev_state));
|
||||
|
@ -597,7 +698,7 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
|
|||
list_add_tail(&item->node, &mhi_cntrl->transition_list);
|
||||
spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
|
||||
|
||||
schedule_work(&mhi_cntrl->st_worker);
|
||||
queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -662,12 +763,10 @@ void mhi_pm_st_worker(struct work_struct *work)
|
|||
mhi_ready_state_transition(mhi_cntrl);
|
||||
break;
|
||||
case DEV_ST_TRANSITION_SYS_ERR:
|
||||
mhi_pm_disable_transition
|
||||
(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
|
||||
mhi_pm_sys_error_transition(mhi_cntrl);
|
||||
break;
|
||||
case DEV_ST_TRANSITION_DISABLE:
|
||||
mhi_pm_disable_transition
|
||||
(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
|
||||
mhi_pm_disable_transition(mhi_cntrl);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -827,6 +926,10 @@ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
|
|||
|
||||
/* Wake up the device */
|
||||
read_lock_bh(&mhi_cntrl->pm_lock);
|
||||
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
|
||||
read_unlock_bh(&mhi_cntrl->pm_lock);
|
||||
return -EIO;
|
||||
}
|
||||
mhi_cntrl->wake_get(mhi_cntrl, true);
|
||||
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
|
||||
mhi_trigger_resume(mhi_cntrl);
|
||||
|
@ -918,9 +1021,6 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
|
|||
|
||||
dev_info(dev, "Requested to power ON\n");
|
||||
|
||||
if (mhi_cntrl->nr_irqs < 1)
|
||||
return -EINVAL;
|
||||
|
||||
/* Supply default wake routines if not provided by controller driver */
|
||||
if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
|
||||
!mhi_cntrl->wake_toggle) {
|
||||
|
@ -1033,29 +1133,39 @@ EXPORT_SYMBOL_GPL(mhi_async_power_up);
|
|||
|
||||
void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
|
||||
{
|
||||
enum mhi_pm_state cur_state;
|
||||
enum mhi_pm_state cur_state, transition_state;
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
|
||||
/* If it's not a graceful shutdown, force MHI to linkdown state */
|
||||
if (!graceful) {
|
||||
mutex_lock(&mhi_cntrl->pm_mutex);
|
||||
write_lock_irq(&mhi_cntrl->pm_lock);
|
||||
cur_state = mhi_tryset_pm_state(mhi_cntrl,
|
||||
MHI_PM_LD_ERR_FATAL_DETECT);
|
||||
write_unlock_irq(&mhi_cntrl->pm_lock);
|
||||
mutex_unlock(&mhi_cntrl->pm_mutex);
|
||||
if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT)
|
||||
dev_dbg(dev, "Failed to move to state: %s from: %s\n",
|
||||
to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
|
||||
to_mhi_pm_state_str(mhi_cntrl->pm_state));
|
||||
transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
|
||||
MHI_PM_LD_ERR_FATAL_DETECT;
|
||||
|
||||
mutex_lock(&mhi_cntrl->pm_mutex);
|
||||
write_lock_irq(&mhi_cntrl->pm_lock);
|
||||
cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
|
||||
if (cur_state != transition_state) {
|
||||
dev_err(dev, "Failed to move to state: %s from: %s\n",
|
||||
to_mhi_pm_state_str(transition_state),
|
||||
to_mhi_pm_state_str(mhi_cntrl->pm_state));
|
||||
/* Force link down or error fatal detected state */
|
||||
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
|
||||
}
|
||||
|
||||
/* mark device inactive to avoid any further host processing */
|
||||
mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
|
||||
mhi_cntrl->dev_state = MHI_STATE_RESET;
|
||||
|
||||
wake_up_all(&mhi_cntrl->state_event);
|
||||
|
||||
write_unlock_irq(&mhi_cntrl->pm_lock);
|
||||
mutex_unlock(&mhi_cntrl->pm_mutex);
|
||||
|
||||
mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
|
||||
|
||||
/* Wait for shutdown to complete */
|
||||
flush_work(&mhi_cntrl->st_worker);
|
||||
|
||||
mhi_deinit_free_irq(mhi_cntrl);
|
||||
free_irq(mhi_cntrl->irq[0], mhi_cntrl);
|
||||
|
||||
if (!mhi_cntrl->pre_init) {
|
||||
/* Free all allocated resources */
|
||||
|
|
|
@ -0,0 +1,345 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* MHI PCI driver - MHI over PCI controller driver
|
||||
*
|
||||
* This module is a generic driver for registering MHI-over-PCI devices,
|
||||
* such as PCIe QCOM modems.
|
||||
*
|
||||
* Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/mhi.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#define MHI_PCI_DEFAULT_BAR_NUM 0
|
||||
|
||||
/**
|
||||
* struct mhi_pci_dev_info - MHI PCI device specific information
|
||||
* @config: MHI controller configuration
|
||||
* @name: name of the PCI module
|
||||
* @fw: firmware path (if any)
|
||||
* @edl: emergency download mode firmware path (if any)
|
||||
* @bar_num: PCI base address register to use for MHI MMIO register space
|
||||
* @dma_data_width: DMA transfer word size (32 or 64 bits)
|
||||
*/
|
||||
struct mhi_pci_dev_info {
|
||||
const struct mhi_controller_config *config;
|
||||
const char *name;
|
||||
const char *fw;
|
||||
const char *edl;
|
||||
unsigned int bar_num;
|
||||
unsigned int dma_data_width;
|
||||
};
|
||||
|
||||
#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
|
||||
{ \
|
||||
.num = ch_num, \
|
||||
.name = ch_name, \
|
||||
.num_elements = el_count, \
|
||||
.event_ring = ev_ring, \
|
||||
.dir = DMA_TO_DEVICE, \
|
||||
.ee_mask = BIT(MHI_EE_AMSS), \
|
||||
.pollcfg = 0, \
|
||||
.doorbell = MHI_DB_BRST_DISABLE, \
|
||||
.lpm_notify = false, \
|
||||
.offload_channel = false, \
|
||||
.doorbell_mode_switch = false, \
|
||||
} \
|
||||
|
||||
#define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \
|
||||
{ \
|
||||
.num = ch_num, \
|
||||
.name = ch_name, \
|
||||
.num_elements = el_count, \
|
||||
.event_ring = ev_ring, \
|
||||
.dir = DMA_FROM_DEVICE, \
|
||||
.ee_mask = BIT(MHI_EE_AMSS), \
|
||||
.pollcfg = 0, \
|
||||
.doorbell = MHI_DB_BRST_DISABLE, \
|
||||
.lpm_notify = false, \
|
||||
.offload_channel = false, \
|
||||
.doorbell_mode_switch = false, \
|
||||
}
|
||||
|
||||
#define MHI_EVENT_CONFIG_CTRL(ev_ring) \
|
||||
{ \
|
||||
.num_elements = 64, \
|
||||
.irq_moderation_ms = 0, \
|
||||
.irq = (ev_ring) + 1, \
|
||||
.priority = 1, \
|
||||
.mode = MHI_DB_BRST_DISABLE, \
|
||||
.data_type = MHI_ER_CTRL, \
|
||||
.hardware_event = false, \
|
||||
.client_managed = false, \
|
||||
.offload_channel = false, \
|
||||
}
|
||||
|
||||
#define MHI_EVENT_CONFIG_DATA(ev_ring) \
|
||||
{ \
|
||||
.num_elements = 128, \
|
||||
.irq_moderation_ms = 5, \
|
||||
.irq = (ev_ring) + 1, \
|
||||
.priority = 1, \
|
||||
.mode = MHI_DB_BRST_DISABLE, \
|
||||
.data_type = MHI_ER_DATA, \
|
||||
.hardware_event = false, \
|
||||
.client_managed = false, \
|
||||
.offload_channel = false, \
|
||||
}
|
||||
|
||||
#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, ch_num) \
|
||||
{ \
|
||||
.num_elements = 128, \
|
||||
.irq_moderation_ms = 5, \
|
||||
.irq = (ev_ring) + 1, \
|
||||
.priority = 1, \
|
||||
.mode = MHI_DB_BRST_DISABLE, \
|
||||
.data_type = MHI_ER_DATA, \
|
||||
.hardware_event = true, \
|
||||
.client_managed = false, \
|
||||
.offload_channel = false, \
|
||||
.channel = ch_num, \
|
||||
}
|
||||
|
||||
static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
|
||||
MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
|
||||
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
|
||||
MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
|
||||
MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
|
||||
MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
|
||||
MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0),
|
||||
MHI_CHANNEL_CONFIG_UL(100, "IP_HW0", 128, 1),
|
||||
MHI_CHANNEL_CONFIG_DL(101, "IP_HW0", 128, 2),
|
||||
};
|
||||
|
||||
static const struct mhi_event_config modem_qcom_v1_mhi_events[] = {
|
||||
/* first ring is control+data ring */
|
||||
MHI_EVENT_CONFIG_CTRL(0),
|
||||
/* Hardware channels request dedicated hardware event rings */
|
||||
MHI_EVENT_CONFIG_HW_DATA(1, 100),
|
||||
MHI_EVENT_CONFIG_HW_DATA(2, 101)
|
||||
};
|
||||
|
||||
static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
|
||||
.max_channels = 128,
|
||||
.timeout_ms = 5000,
|
||||
.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
|
||||
.ch_cfg = modem_qcom_v1_mhi_channels,
|
||||
.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
|
||||
.event_cfg = modem_qcom_v1_mhi_events,
|
||||
};
|
||||
|
||||
static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
|
||||
.name = "qcom-sdx55m",
|
||||
.fw = "qcom/sdx55m/sbl1.mbn",
|
||||
.edl = "qcom/sdx55m/edl.mbn",
|
||||
.config = &modem_qcom_v1_mhiv_config,
|
||||
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
|
||||
.dma_data_width = 32
|
||||
};
|
||||
|
||||
static const struct pci_device_id mhi_pci_id_table[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
|
||||
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
|
||||
|
||||
static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
|
||||
void __iomem *addr, u32 *out)
|
||||
{
|
||||
*out = readl(addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
|
||||
void __iomem *addr, u32 val)
|
||||
{
|
||||
writel(val, addr);
|
||||
}
|
||||
|
||||
static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
|
||||
enum mhi_callback cb)
|
||||
{
|
||||
/* Nothing to do for now */
|
||||
}
|
||||
|
||||
static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
|
||||
unsigned int bar_num, u64 dma_mask)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
|
||||
int err;
|
||||
|
||||
err = pci_assign_resource(pdev, bar_num);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = pcim_enable_device(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
|
||||
|
||||
err = pci_set_dma_mask(pdev, dma_mask);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = pci_set_consistent_dma_mask(pdev, dma_mask);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "set consistent dma mask failed\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
|
||||
const struct mhi_controller_config *mhi_cntrl_config)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
|
||||
int nr_vectors, i;
|
||||
int *irq;
|
||||
|
||||
/*
|
||||
* Alloc one MSI vector for BHI + one vector per event ring, ideally...
|
||||
* No explicit pci_free_irq_vectors required, done by pcim_release.
|
||||
*/
|
||||
mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
|
||||
|
||||
nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
|
||||
if (nr_vectors < 0) {
|
||||
dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
|
||||
nr_vectors);
|
||||
return nr_vectors;
|
||||
}
|
||||
|
||||
if (nr_vectors < mhi_cntrl->nr_irqs) {
|
||||
dev_warn(&pdev->dev, "Not enough MSI vectors (%d/%d), use shared MSI\n",
|
||||
nr_vectors, mhi_cntrl_config->num_events);
|
||||
}
|
||||
|
||||
irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
|
||||
if (!irq)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
|
||||
int vector = i >= nr_vectors ? (nr_vectors - 1) : i;
|
||||
|
||||
irq[i] = pci_irq_vector(pdev, vector);
|
||||
}
|
||||
|
||||
mhi_cntrl->irq = irq;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
/* no PM for now */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
/* no PM for now */
|
||||
}
|
||||
|
||||
static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
|
||||
const struct mhi_controller_config *mhi_cntrl_config;
|
||||
struct mhi_controller *mhi_cntrl;
|
||||
int err;
|
||||
|
||||
dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name);
|
||||
|
||||
mhi_cntrl = mhi_alloc_controller();
|
||||
if (!mhi_cntrl)
|
||||
return -ENOMEM;
|
||||
|
||||
mhi_cntrl_config = info->config;
|
||||
mhi_cntrl->cntrl_dev = &pdev->dev;
|
||||
mhi_cntrl->iova_start = 0;
|
||||
mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
|
||||
mhi_cntrl->fw_image = info->fw;
|
||||
mhi_cntrl->edl_image = info->edl;
|
||||
|
||||
mhi_cntrl->read_reg = mhi_pci_read_reg;
|
||||
mhi_cntrl->write_reg = mhi_pci_write_reg;
|
||||
mhi_cntrl->status_cb = mhi_pci_status_cb;
|
||||
mhi_cntrl->runtime_get = mhi_pci_runtime_get;
|
||||
mhi_cntrl->runtime_put = mhi_pci_runtime_put;
|
||||
|
||||
err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
|
||||
if (err)
|
||||
goto err_release;
|
||||
|
||||
err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
|
||||
if (err)
|
||||
goto err_release;
|
||||
|
||||
pci_set_drvdata(pdev, mhi_cntrl);
|
||||
|
||||
err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
|
||||
if (err)
|
||||
goto err_release;
|
||||
|
||||
/* MHI bus does not power up the controller by default */
|
||||
err = mhi_prepare_for_power_up(mhi_cntrl);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to prepare MHI controller\n");
|
||||
goto err_unregister;
|
||||
}
|
||||
|
||||
err = mhi_sync_power_up(mhi_cntrl);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to power up MHI controller\n");
|
||||
goto err_unprepare;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_unprepare:
|
||||
mhi_unprepare_after_power_down(mhi_cntrl);
|
||||
err_unregister:
|
||||
mhi_unregister_controller(mhi_cntrl);
|
||||
err_release:
|
||||
mhi_free_controller(mhi_cntrl);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mhi_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct mhi_controller *mhi_cntrl = pci_get_drvdata(pdev);
|
||||
|
||||
mhi_power_down(mhi_cntrl, true);
|
||||
mhi_unprepare_after_power_down(mhi_cntrl);
|
||||
mhi_unregister_controller(mhi_cntrl);
|
||||
mhi_free_controller(mhi_cntrl);
|
||||
}
|
||||
|
||||
static struct pci_driver mhi_pci_driver = {
|
||||
.name = "mhi-pci-generic",
|
||||
.id_table = mhi_pci_id_table,
|
||||
.probe = mhi_pci_probe,
|
||||
.remove = mhi_pci_remove
|
||||
};
|
||||
module_pci_driver(mhi_pci_driver);
|
||||
|
||||
MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
|
||||
MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -622,7 +622,6 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
|
|||
break;
|
||||
case LPSETIRQ:
|
||||
return -EINVAL;
|
||||
break;
|
||||
case LPGETIRQ:
|
||||
if (copy_to_user(argp, &LP_IRQ(minor),
|
||||
sizeof(int)))
|
||||
|
|
|
@ -60,7 +60,7 @@ static DEFINE_MUTEX(misc_mtx);
|
|||
/*
|
||||
* Assigned numbers, used for dynamic minors
|
||||
*/
|
||||
#define DYNAMIC_MINORS 64 /* like dynamic majors */
|
||||
#define DYNAMIC_MINORS 128 /* like dynamic majors */
|
||||
static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
|
|
@ -403,7 +403,6 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
|
|||
|
||||
default:
|
||||
return -ENOTTY;
|
||||
break;
|
||||
} /* switch */
|
||||
|
||||
PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval);
|
||||
|
|
|
@ -186,4 +186,12 @@ config EXTCON_USBC_CROS_EC
|
|||
Say Y here to enable USB Type C cable detection extcon support when
|
||||
using Chrome OS EC based USB Type-C ports.
|
||||
|
||||
config EXTCON_USBC_TUSB320
|
||||
tristate "TI TUSB320 USB-C extcon support"
|
||||
depends on I2C
|
||||
select REGMAP_I2C
|
||||
help
|
||||
Say Y here to enable support for USB Type C cable detection extcon
|
||||
support using a TUSB320.
|
||||
|
||||
endif
|
||||
|
|
|
@ -25,3 +25,4 @@ obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o
|
|||
obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o
|
||||
obj-$(CONFIG_EXTCON_USB_GPIO) += extcon-usb-gpio.o
|
||||
obj-$(CONFIG_EXTCON_USBC_CROS_EC) += extcon-usbc-cros-ec.o
|
||||
obj-$(CONFIG_EXTCON_USBC_TUSB320) += extcon-usbc-tusb320.o
|
||||
|
|
|
@ -364,6 +364,7 @@ MODULE_DEVICE_TABLE(i2c, fsa9480_id);
|
|||
static const struct of_device_id fsa9480_of_match[] = {
|
||||
{ .compatible = "fcs,fsa9480", },
|
||||
{ .compatible = "fcs,fsa880", },
|
||||
{ .compatible = "ti,tsu6111", },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fsa9480_of_match);
|
||||
|
|
|
@ -1277,4 +1277,4 @@ module_platform_driver(max77693_muic_driver);
|
|||
MODULE_DESCRIPTION("Maxim MAX77693 Extcon driver");
|
||||
MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("platform:extcon-max77693");
|
||||
MODULE_ALIAS("platform:max77693-muic");
|
||||
|
|
|
@ -0,0 +1,184 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/**
|
||||
* drivers/extcon/extcon-tusb320.c - TUSB320 extcon driver
|
||||
*
|
||||
* Copyright (C) 2020 National Instruments Corporation
|
||||
* Author: Michael Auchter <michael.auchter@ni.com>
|
||||
*/
|
||||
|
||||
#include <linux/extcon-provider.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/regmap.h>
|
||||
|
||||
#define TUSB320_REG9 0x9
|
||||
#define TUSB320_REG9_ATTACHED_STATE_SHIFT 6
|
||||
#define TUSB320_REG9_ATTACHED_STATE_MASK 0x3
|
||||
#define TUSB320_REG9_CABLE_DIRECTION BIT(5)
|
||||
#define TUSB320_REG9_INTERRUPT_STATUS BIT(4)
|
||||
#define TUSB320_ATTACHED_STATE_NONE 0x0
|
||||
#define TUSB320_ATTACHED_STATE_DFP 0x1
|
||||
#define TUSB320_ATTACHED_STATE_UFP 0x2
|
||||
#define TUSB320_ATTACHED_STATE_ACC 0x3
|
||||
|
||||
struct tusb320_priv {
|
||||
struct device *dev;
|
||||
struct regmap *regmap;
|
||||
struct extcon_dev *edev;
|
||||
};
|
||||
|
||||
static const char * const tusb_attached_states[] = {
|
||||
[TUSB320_ATTACHED_STATE_NONE] = "not attached",
|
||||
[TUSB320_ATTACHED_STATE_DFP] = "downstream facing port",
|
||||
[TUSB320_ATTACHED_STATE_UFP] = "upstream facing port",
|
||||
[TUSB320_ATTACHED_STATE_ACC] = "accessory",
|
||||
};
|
||||
|
||||
static const unsigned int tusb320_extcon_cable[] = {
|
||||
EXTCON_USB,
|
||||
EXTCON_USB_HOST,
|
||||
EXTCON_NONE,
|
||||
};
|
||||
|
||||
static int tusb320_check_signature(struct tusb320_priv *priv)
|
||||
{
|
||||
static const char sig[] = { '\0', 'T', 'U', 'S', 'B', '3', '2', '0' };
|
||||
unsigned val;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < sizeof(sig); i++) {
|
||||
ret = regmap_read(priv->regmap, sizeof(sig) - 1 - i, &val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (val != sig[i]) {
|
||||
dev_err(priv->dev, "signature mismatch!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct tusb320_priv *priv = dev_id;
|
||||
int state, polarity;
|
||||
unsigned reg;
|
||||
|
||||
if (regmap_read(priv->regmap, TUSB320_REG9, ®)) {
|
||||
dev_err(priv->dev, "error during i2c read!\n");
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
if (!(reg & TUSB320_REG9_INTERRUPT_STATUS))
|
||||
return IRQ_NONE;
|
||||
|
||||
state = (reg >> TUSB320_REG9_ATTACHED_STATE_SHIFT) &
|
||||
TUSB320_REG9_ATTACHED_STATE_MASK;
|
||||
polarity = !!(reg & TUSB320_REG9_CABLE_DIRECTION);
|
||||
|
||||
dev_dbg(priv->dev, "attached state: %s, polarity: %d\n",
|
||||
tusb_attached_states[state], polarity);
|
||||
|
||||
extcon_set_state(priv->edev, EXTCON_USB,
|
||||
state == TUSB320_ATTACHED_STATE_UFP);
|
||||
extcon_set_state(priv->edev, EXTCON_USB_HOST,
|
||||
state == TUSB320_ATTACHED_STATE_DFP);
|
||||
extcon_set_property(priv->edev, EXTCON_USB,
|
||||
EXTCON_PROP_USB_TYPEC_POLARITY,
|
||||
(union extcon_property_value)polarity);
|
||||
extcon_set_property(priv->edev, EXTCON_USB_HOST,
|
||||
EXTCON_PROP_USB_TYPEC_POLARITY,
|
||||
(union extcon_property_value)polarity);
|
||||
extcon_sync(priv->edev, EXTCON_USB);
|
||||
extcon_sync(priv->edev, EXTCON_USB_HOST);
|
||||
|
||||
regmap_write(priv->regmap, TUSB320_REG9, reg);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static const struct regmap_config tusb320_regmap_config = {
|
||||
.reg_bits = 8,
|
||||
.val_bits = 8,
|
||||
};
|
||||
|
||||
static int tusb320_extcon_probe(struct i2c_client *client,
|
||||
const struct i2c_device_id *id)
|
||||
{
|
||||
struct tusb320_priv *priv;
|
||||
int ret;
|
||||
|
||||
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
priv->dev = &client->dev;
|
||||
|
||||
priv->regmap = devm_regmap_init_i2c(client, &tusb320_regmap_config);
|
||||
if (IS_ERR(priv->regmap))
|
||||
return PTR_ERR(priv->regmap);
|
||||
|
||||
ret = tusb320_check_signature(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->edev = devm_extcon_dev_allocate(priv->dev, tusb320_extcon_cable);
|
||||
if (IS_ERR(priv->edev)) {
|
||||
dev_err(priv->dev, "failed to allocate extcon device\n");
|
||||
return PTR_ERR(priv->edev);
|
||||
}
|
||||
|
||||
ret = devm_extcon_dev_register(priv->dev, priv->edev);
|
||||
if (ret < 0) {
|
||||
dev_err(priv->dev, "failed to register extcon device\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
extcon_set_property_capability(priv->edev, EXTCON_USB,
|
||||
EXTCON_PROP_USB_TYPEC_POLARITY);
|
||||
extcon_set_property_capability(priv->edev, EXTCON_USB_HOST,
|
||||
EXTCON_PROP_USB_TYPEC_POLARITY);
|
||||
|
||||
/* update initial state */
|
||||
tusb320_irq_handler(client->irq, priv);
|
||||
|
||||
ret = devm_request_threaded_irq(priv->dev, client->irq, NULL,
|
||||
tusb320_irq_handler,
|
||||
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
|
||||
client->name, priv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct of_device_id tusb320_extcon_dt_match[] = {
|
||||
{ .compatible = "ti,tusb320", },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, tusb320_extcon_dt_match);
|
||||
|
||||
static struct i2c_driver tusb320_extcon_driver = {
|
||||
.probe = tusb320_extcon_probe,
|
||||
.driver = {
|
||||
.name = "extcon-tusb320",
|
||||
.of_match_table = tusb320_extcon_dt_match,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init tusb320_init(void)
|
||||
{
|
||||
return i2c_add_driver(&tusb320_extcon_driver);
|
||||
}
|
||||
subsys_initcall(tusb320_init);
|
||||
|
||||
static void __exit tusb320_exit(void)
|
||||
{
|
||||
i2c_del_driver(&tusb320_extcon_driver);
|
||||
}
|
||||
module_exit(tusb320_exit);
|
||||
|
||||
MODULE_AUTHOR("Michael Auchter <michael.auchter@ni.com>");
|
||||
MODULE_DESCRIPTION("TI TUSB320 extcon driver");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -51,7 +51,7 @@ config ARM_SCPI_PROTOCOL
|
|||
provides a mechanism for inter-processor communication between SCP
|
||||
and AP.
|
||||
|
||||
SCP controls most of the power managament on the Application
|
||||
SCP controls most of the power management on the Application
|
||||
Processors. It offers control and management of: the core/cluster
|
||||
power states, various power domain DVFS including the core/cluster,
|
||||
certain system clocks configuration, thermal sensors and many
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
@ -85,7 +84,6 @@
|
|||
struct gsmi_buf {
|
||||
u8 *start; /* start of buffer */
|
||||
size_t length; /* length of buffer */
|
||||
dma_addr_t handle; /* dma allocation handle */
|
||||
u32 address; /* physical address of buffer */
|
||||
};
|
||||
|
||||
|
@ -97,7 +95,7 @@ static struct gsmi_device {
|
|||
spinlock_t lock; /* serialize access to SMIs */
|
||||
u16 smi_cmd; /* SMI command port */
|
||||
int handshake_type; /* firmware handler interlock type */
|
||||
struct dma_pool *dma_pool; /* DMA buffer pool */
|
||||
struct kmem_cache *mem_pool; /* kmem cache for gsmi_buf allocations */
|
||||
} gsmi_dev;
|
||||
|
||||
/* Packed structures for communicating with the firmware */
|
||||
|
@ -157,8 +155,7 @@ static struct gsmi_buf *gsmi_buf_alloc(void)
|
|||
}
|
||||
|
||||
/* allocate buffer in 32bit address space */
|
||||
smibuf->start = dma_pool_alloc(gsmi_dev.dma_pool, GFP_KERNEL,
|
||||
&smibuf->handle);
|
||||
smibuf->start = kmem_cache_alloc(gsmi_dev.mem_pool, GFP_KERNEL);
|
||||
if (!smibuf->start) {
|
||||
printk(KERN_ERR "gsmi: failed to allocate name buffer\n");
|
||||
kfree(smibuf);
|
||||
|
@ -176,8 +173,7 @@ static void gsmi_buf_free(struct gsmi_buf *smibuf)
|
|||
{
|
||||
if (smibuf) {
|
||||
if (smibuf->start)
|
||||
dma_pool_free(gsmi_dev.dma_pool, smibuf->start,
|
||||
smibuf->handle);
|
||||
kmem_cache_free(gsmi_dev.mem_pool, smibuf->start);
|
||||
kfree(smibuf);
|
||||
}
|
||||
}
|
||||
|
@ -914,9 +910,20 @@ static __init int gsmi_init(void)
|
|||
spin_lock_init(&gsmi_dev.lock);
|
||||
|
||||
ret = -ENOMEM;
|
||||
gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev,
|
||||
GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0);
|
||||
if (!gsmi_dev.dma_pool)
|
||||
|
||||
/*
|
||||
* SLAB cache is created using SLAB_CACHE_DMA32 to ensure that the
|
||||
* allocations for gsmi_buf come from the DMA32 memory zone. These
|
||||
* buffers have nothing to do with DMA. They are required for
|
||||
* communication with firmware executing in SMI mode which can only
|
||||
* access the bottom 4GiB of physical memory. Since DMA32 memory zone
|
||||
* guarantees allocation under the 4GiB boundary, this driver creates
|
||||
* a SLAB cache with SLAB_CACHE_DMA32 flag.
|
||||
*/
|
||||
gsmi_dev.mem_pool = kmem_cache_create("gsmi", GSMI_BUF_SIZE,
|
||||
GSMI_BUF_ALIGN,
|
||||
SLAB_CACHE_DMA32, NULL);
|
||||
if (!gsmi_dev.mem_pool)
|
||||
goto out_err;
|
||||
|
||||
/*
|
||||
|
@ -1032,7 +1039,7 @@ out_err:
|
|||
gsmi_buf_free(gsmi_dev.param_buf);
|
||||
gsmi_buf_free(gsmi_dev.data_buf);
|
||||
gsmi_buf_free(gsmi_dev.name_buf);
|
||||
dma_pool_destroy(gsmi_dev.dma_pool);
|
||||
kmem_cache_destroy(gsmi_dev.mem_pool);
|
||||
platform_device_unregister(gsmi_dev.pdev);
|
||||
pr_info("gsmi: failed to load: %d\n", ret);
|
||||
#ifdef CONFIG_PM
|
||||
|
@ -1057,7 +1064,7 @@ static void __exit gsmi_exit(void)
|
|||
gsmi_buf_free(gsmi_dev.param_buf);
|
||||
gsmi_buf_free(gsmi_dev.data_buf);
|
||||
gsmi_buf_free(gsmi_dev.name_buf);
|
||||
dma_pool_destroy(gsmi_dev.dma_pool);
|
||||
kmem_cache_destroy(gsmi_dev.mem_pool);
|
||||
platform_device_unregister(gsmi_dev.pdev);
|
||||
#ifdef CONFIG_PM
|
||||
platform_driver_unregister(&gsmi_driver_info);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Defines interfaces for interacting wtih the Raspberry Pi firmware's
|
||||
* Defines interfaces for interacting with the Raspberry Pi firmware's
|
||||
* property channel.
|
||||
*
|
||||
* Copyright © 2015 Broadcom
|
||||
|
|
|
@ -28,15 +28,6 @@ static int alt_pr_platform_probe(struct platform_device *pdev)
|
|||
return alt_pr_register(dev, reg_base);
|
||||
}
|
||||
|
||||
static int alt_pr_platform_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
|
||||
alt_pr_unregister(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id alt_pr_of_match[] = {
|
||||
{ .compatible = "altr,a10-pr-ip", },
|
||||
{},
|
||||
|
@ -46,7 +37,6 @@ MODULE_DEVICE_TABLE(of, alt_pr_of_match);
|
|||
|
||||
static struct platform_driver alt_pr_platform_driver = {
|
||||
.probe = alt_pr_platform_probe,
|
||||
.remove = alt_pr_platform_remove,
|
||||
.driver = {
|
||||
.name = "alt_a10_pr_ip",
|
||||
.of_match_table = alt_pr_of_match,
|
||||
|
|
|
@ -195,9 +195,7 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base)
|
|||
if (!mgr)
|
||||
return -ENOMEM;
|
||||
|
||||
dev_set_drvdata(dev, mgr);
|
||||
|
||||
return fpga_mgr_register(mgr);
|
||||
return devm_fpga_mgr_register(dev, mgr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(alt_pr_register);
|
||||
|
||||
|
|
|
@ -307,18 +307,7 @@ static int altera_ps_probe(struct spi_device *spi)
|
|||
if (!mgr)
|
||||
return -ENOMEM;
|
||||
|
||||
spi_set_drvdata(spi, mgr);
|
||||
|
||||
return fpga_mgr_register(mgr);
|
||||
}
|
||||
|
||||
static int altera_ps_remove(struct spi_device *spi)
|
||||
{
|
||||
struct fpga_manager *mgr = spi_get_drvdata(spi);
|
||||
|
||||
fpga_mgr_unregister(mgr);
|
||||
|
||||
return 0;
|
||||
return devm_fpga_mgr_register(&spi->dev, mgr);
|
||||
}
|
||||
|
||||
static const struct spi_device_id altera_ps_spi_ids[] = {
|
||||
|
@ -337,7 +326,6 @@ static struct spi_driver altera_ps_driver = {
|
|||
},
|
||||
.id_table = altera_ps_spi_ids,
|
||||
.probe = altera_ps_probe,
|
||||
.remove = altera_ps_remove,
|
||||
};
|
||||
|
||||
module_spi_driver(altera_ps_driver)
|
||||
|
|
|
@ -314,18 +314,8 @@ static int fme_mgr_probe(struct platform_device *pdev)
|
|||
return -ENOMEM;
|
||||
|
||||
mgr->compat_id = compat_id;
|
||||
platform_set_drvdata(pdev, mgr);
|
||||
|
||||
return fpga_mgr_register(mgr);
|
||||
}
|
||||
|
||||
static int fme_mgr_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct fpga_manager *mgr = platform_get_drvdata(pdev);
|
||||
|
||||
fpga_mgr_unregister(mgr);
|
||||
|
||||
return 0;
|
||||
return devm_fpga_mgr_register(dev, mgr);
|
||||
}
|
||||
|
||||
static struct platform_driver fme_mgr_driver = {
|
||||
|
@ -333,7 +323,6 @@ static struct platform_driver fme_mgr_driver = {
|
|||
.name = DFL_FPGA_FME_MGR,
|
||||
},
|
||||
.probe = fme_mgr_probe,
|
||||
.remove = fme_mgr_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(fme_mgr_driver);
|
||||
|
|
|
@ -21,6 +21,10 @@
|
|||
static DEFINE_IDA(fpga_mgr_ida);
|
||||
static struct class *fpga_mgr_class;
|
||||
|
||||
struct fpga_mgr_devres {
|
||||
struct fpga_manager *mgr;
|
||||
};
|
||||
|
||||
/**
|
||||
* fpga_image_info_alloc - Allocate a FPGA image info struct
|
||||
* @dev: owning device
|
||||
|
@ -625,9 +629,9 @@ EXPORT_SYMBOL_GPL(fpga_mgr_free);
|
|||
|
||||
static void devm_fpga_mgr_release(struct device *dev, void *res)
|
||||
{
|
||||
struct fpga_manager *mgr = *(struct fpga_manager **)res;
|
||||
struct fpga_mgr_devres *dr = res;
|
||||
|
||||
fpga_mgr_free(mgr);
|
||||
fpga_mgr_free(dr->mgr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -651,21 +655,21 @@ struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
|
|||
const struct fpga_manager_ops *mops,
|
||||
void *priv)
|
||||
{
|
||||
struct fpga_manager **ptr, *mgr;
|
||||
struct fpga_mgr_devres *dr;
|
||||
|
||||
ptr = devres_alloc(devm_fpga_mgr_release, sizeof(*ptr), GFP_KERNEL);
|
||||
if (!ptr)
|
||||
dr = devres_alloc(devm_fpga_mgr_release, sizeof(*dr), GFP_KERNEL);
|
||||
if (!dr)
|
||||
return NULL;
|
||||
|
||||
mgr = fpga_mgr_create(dev, name, mops, priv);
|
||||
if (!mgr) {
|
||||
devres_free(ptr);
|
||||
} else {
|
||||
*ptr = mgr;
|
||||
devres_add(dev, ptr);
|
||||
dr->mgr = fpga_mgr_create(dev, name, mops, priv);
|
||||
if (!dr->mgr) {
|
||||
devres_free(dr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return mgr;
|
||||
devres_add(dev, dr);
|
||||
|
||||
return dr->mgr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_fpga_mgr_create);
|
||||
|
||||
|
@ -722,6 +726,59 @@ void fpga_mgr_unregister(struct fpga_manager *mgr)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(fpga_mgr_unregister);
|
||||
|
||||
static int fpga_mgr_devres_match(struct device *dev, void *res,
|
||||
void *match_data)
|
||||
{
|
||||
struct fpga_mgr_devres *dr = res;
|
||||
|
||||
return match_data == dr->mgr;
|
||||
}
|
||||
|
||||
static void devm_fpga_mgr_unregister(struct device *dev, void *res)
|
||||
{
|
||||
struct fpga_mgr_devres *dr = res;
|
||||
|
||||
fpga_mgr_unregister(dr->mgr);
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_fpga_mgr_register - resource managed variant of fpga_mgr_register()
|
||||
* @dev: managing device for this FPGA manager
|
||||
* @mgr: fpga manager struct
|
||||
*
|
||||
* This is the devres variant of fpga_mgr_register() for which the unregister
|
||||
* function will be called automatically when the managing device is detached.
|
||||
*/
|
||||
int devm_fpga_mgr_register(struct device *dev, struct fpga_manager *mgr)
|
||||
{
|
||||
struct fpga_mgr_devres *dr;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Make sure that the struct fpga_manager * that is passed in is
|
||||
* managed itself.
|
||||
*/
|
||||
if (WARN_ON(!devres_find(dev, devm_fpga_mgr_release,
|
||||
fpga_mgr_devres_match, mgr)))
|
||||
return -EINVAL;
|
||||
|
||||
dr = devres_alloc(devm_fpga_mgr_unregister, sizeof(*dr), GFP_KERNEL);
|
||||
if (!dr)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = fpga_mgr_register(mgr);
|
||||
if (ret) {
|
||||
devres_free(dr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dr->mgr = mgr;
|
||||
devres_add(dev, dr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_fpga_mgr_register);
|
||||
|
||||
static void fpga_mgr_dev_release(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -183,18 +183,7 @@ static int ice40_fpga_probe(struct spi_device *spi)
|
|||
if (!mgr)
|
||||
return -ENOMEM;
|
||||
|
||||
spi_set_drvdata(spi, mgr);
|
||||
|
||||
return fpga_mgr_register(mgr);
|
||||
}
|
||||
|
||||
static int ice40_fpga_remove(struct spi_device *spi)
|
||||
{
|
||||
struct fpga_manager *mgr = spi_get_drvdata(spi);
|
||||
|
||||
fpga_mgr_unregister(mgr);
|
||||
|
||||
return 0;
|
||||
return devm_fpga_mgr_register(dev, mgr);
|
||||
}
|
||||
|
||||
static const struct of_device_id ice40_fpga_of_match[] = {
|
||||
|
@ -205,7 +194,6 @@ MODULE_DEVICE_TABLE(of, ice40_fpga_of_match);
|
|||
|
||||
static struct spi_driver ice40_fpga_driver = {
|
||||
.probe = ice40_fpga_probe,
|
||||
.remove = ice40_fpga_remove,
|
||||
.driver = {
|
||||
.name = "ice40spi",
|
||||
.of_match_table = of_match_ptr(ice40_fpga_of_match),
|
||||
|
|
|
@ -371,18 +371,7 @@ static int machxo2_spi_probe(struct spi_device *spi)
|
|||
if (!mgr)
|
||||
return -ENOMEM;
|
||||
|
||||
spi_set_drvdata(spi, mgr);
|
||||
|
||||
return fpga_mgr_register(mgr);
|
||||
}
|
||||
|
||||
static int machxo2_spi_remove(struct spi_device *spi)
|
||||
{
|
||||
struct fpga_manager *mgr = spi_get_drvdata(spi);
|
||||
|
||||
fpga_mgr_unregister(mgr);
|
||||
|
||||
return 0;
|
||||
return devm_fpga_mgr_register(dev, mgr);
|
||||
}
|
||||
|
||||
static const struct of_device_id of_match[] = {
|
||||
|
@ -403,7 +392,6 @@ static struct spi_driver machxo2_spi_driver = {
|
|||
.of_match_table = of_match_ptr(of_match),
|
||||
},
|
||||
.probe = machxo2_spi_probe,
|
||||
.remove = machxo2_spi_remove,
|
||||
.id_table = lattice_ids,
|
||||
};
|
||||
|
||||
|
|
|
@ -576,18 +576,7 @@ static int socfpga_fpga_probe(struct platform_device *pdev)
|
|||
if (!mgr)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_set_drvdata(pdev, mgr);
|
||||
|
||||
return fpga_mgr_register(mgr);
|
||||
}
|
||||
|
||||
static int socfpga_fpga_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct fpga_manager *mgr = platform_get_drvdata(pdev);
|
||||
|
||||
fpga_mgr_unregister(mgr);
|
||||
|
||||
return 0;
|
||||
return devm_fpga_mgr_register(dev, mgr);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
|
@ -601,7 +590,6 @@ MODULE_DEVICE_TABLE(of, socfpga_fpga_of_match);
|
|||
|
||||
static struct platform_driver socfpga_fpga_driver = {
|
||||
.probe = socfpga_fpga_probe,
|
||||
.remove = socfpga_fpga_remove,
|
||||
.driver = {
|
||||
.name = "socfpga_fpga_manager",
|
||||
.of_match_table = of_match_ptr(socfpga_fpga_of_match),
|
||||
|
|
|
@ -127,18 +127,7 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
|
|||
if (!mgr)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_set_drvdata(pdev, mgr);
|
||||
|
||||
return fpga_mgr_register(mgr);
|
||||
}
|
||||
|
||||
static int ts73xx_fpga_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct fpga_manager *mgr = platform_get_drvdata(pdev);
|
||||
|
||||
fpga_mgr_unregister(mgr);
|
||||
|
||||
return 0;
|
||||
return devm_fpga_mgr_register(kdev, mgr);
|
||||
}
|
||||
|
||||
static struct platform_driver ts73xx_fpga_driver = {
|
||||
|
@ -146,7 +135,6 @@ static struct platform_driver ts73xx_fpga_driver = {
|
|||
.name = "ts73xx-fpga-mgr",
|
||||
},
|
||||
.probe = ts73xx_fpga_probe,
|
||||
.remove = ts73xx_fpga_remove,
|
||||
};
|
||||
module_platform_driver(ts73xx_fpga_driver);
|
||||
|
||||
|
|
|
@ -259,18 +259,7 @@ static int xilinx_spi_probe(struct spi_device *spi)
|
|||
if (!mgr)
|
||||
return -ENOMEM;
|
||||
|
||||
spi_set_drvdata(spi, mgr);
|
||||
|
||||
return fpga_mgr_register(mgr);
|
||||
}
|
||||
|
||||
static int xilinx_spi_remove(struct spi_device *spi)
|
||||
{
|
||||
struct fpga_manager *mgr = spi_get_drvdata(spi);
|
||||
|
||||
fpga_mgr_unregister(mgr);
|
||||
|
||||
return 0;
|
||||
return devm_fpga_mgr_register(&spi->dev, mgr);
|
||||
}
|
||||
|
||||
static const struct of_device_id xlnx_spi_of_match[] = {
|
||||
|
@ -285,7 +274,6 @@ static struct spi_driver xilinx_slave_spi_driver = {
|
|||
.of_match_table = of_match_ptr(xlnx_spi_of_match),
|
||||
},
|
||||
.probe = xilinx_spi_probe,
|
||||
.remove = xilinx_spi_remove,
|
||||
};
|
||||
|
||||
module_spi_driver(xilinx_slave_spi_driver)
|
||||
|
|
|
@ -95,7 +95,6 @@ static int zynqmp_fpga_probe(struct platform_device *pdev)
|
|||
struct device *dev = &pdev->dev;
|
||||
struct zynqmp_fpga_priv *priv;
|
||||
struct fpga_manager *mgr;
|
||||
int ret;
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
|
@ -108,24 +107,7 @@ static int zynqmp_fpga_probe(struct platform_device *pdev)
|
|||
if (!mgr)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_set_drvdata(pdev, mgr);
|
||||
|
||||
ret = fpga_mgr_register(mgr);
|
||||
if (ret) {
|
||||
dev_err(dev, "unable to register FPGA manager");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int zynqmp_fpga_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct fpga_manager *mgr = platform_get_drvdata(pdev);
|
||||
|
||||
fpga_mgr_unregister(mgr);
|
||||
|
||||
return 0;
|
||||
return devm_fpga_mgr_register(dev, mgr);
|
||||
}
|
||||
|
||||
static const struct of_device_id zynqmp_fpga_of_match[] = {
|
||||
|
@ -137,7 +119,6 @@ MODULE_DEVICE_TABLE(of, zynqmp_fpga_of_match);
|
|||
|
||||
static struct platform_driver zynqmp_fpga_driver = {
|
||||
.probe = zynqmp_fpga_probe,
|
||||
.remove = zynqmp_fpga_remove,
|
||||
.driver = {
|
||||
.name = "zynqmp_fpga_manager",
|
||||
.of_match_table = of_match_ptr(zynqmp_fpga_of_match),
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/regmap.h>
|
||||
|
@ -19,6 +20,7 @@
|
|||
|
||||
struct fsi_master_aspeed {
|
||||
struct fsi_master master;
|
||||
struct mutex lock; /* protect HW access */
|
||||
struct device *dev;
|
||||
void __iomem *base;
|
||||
struct clk *clk;
|
||||
|
@ -254,6 +256,8 @@ static int aspeed_master_read(struct fsi_master *master, int link,
|
|||
addr |= id << 21;
|
||||
addr += link * FSI_HUB_LINK_SIZE;
|
||||
|
||||
mutex_lock(&aspeed->lock);
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
ret = opb_readb(aspeed, fsi_base + addr, val);
|
||||
|
@ -265,14 +269,14 @@ static int aspeed_master_read(struct fsi_master *master, int link,
|
|||
ret = opb_readl(aspeed, fsi_base + addr, val);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
ret = check_errors(aspeed, ret);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
done:
|
||||
mutex_unlock(&aspeed->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int aspeed_master_write(struct fsi_master *master, int link,
|
||||
|
@ -287,6 +291,8 @@ static int aspeed_master_write(struct fsi_master *master, int link,
|
|||
addr |= id << 21;
|
||||
addr += link * FSI_HUB_LINK_SIZE;
|
||||
|
||||
mutex_lock(&aspeed->lock);
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
ret = opb_writeb(aspeed, fsi_base + addr, *(u8 *)val);
|
||||
|
@ -298,14 +304,14 @@ static int aspeed_master_write(struct fsi_master *master, int link,
|
|||
ret = opb_writel(aspeed, fsi_base + addr, *(__be32 *)val);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
ret = check_errors(aspeed, ret);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
done:
|
||||
mutex_unlock(&aspeed->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int aspeed_master_link_enable(struct fsi_master *master, int link,
|
||||
|
@ -320,17 +326,21 @@ static int aspeed_master_link_enable(struct fsi_master *master, int link,
|
|||
|
||||
reg = cpu_to_be32(0x80000000 >> bit);
|
||||
|
||||
if (!enable)
|
||||
return opb_writel(aspeed, ctrl_base + FSI_MCENP0 + (4 * idx),
|
||||
reg);
|
||||
mutex_lock(&aspeed->lock);
|
||||
|
||||
if (!enable) {
|
||||
ret = opb_writel(aspeed, ctrl_base + FSI_MCENP0 + (4 * idx), reg);
|
||||
goto done;
|
||||
}
|
||||
|
||||
ret = opb_writel(aspeed, ctrl_base + FSI_MSENP0 + (4 * idx), reg);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto done;
|
||||
|
||||
mdelay(FSI_LINK_ENABLE_SETUP_TIME);
|
||||
|
||||
return 0;
|
||||
done:
|
||||
mutex_unlock(&aspeed->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int aspeed_master_term(struct fsi_master *master, int link, uint8_t id)
|
||||
|
@ -431,9 +441,11 @@ static ssize_t cfam_reset_store(struct device *dev, struct device_attribute *att
|
|||
{
|
||||
struct fsi_master_aspeed *aspeed = dev_get_drvdata(dev);
|
||||
|
||||
mutex_lock(&aspeed->lock);
|
||||
gpiod_set_value(aspeed->cfam_reset_gpio, 1);
|
||||
usleep_range(900, 1000);
|
||||
gpiod_set_value(aspeed->cfam_reset_gpio, 0);
|
||||
mutex_unlock(&aspeed->lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -597,6 +609,7 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev)
|
|||
|
||||
dev_set_drvdata(&pdev->dev, aspeed);
|
||||
|
||||
mutex_init(&aspeed->lock);
|
||||
aspeed_master_init(aspeed);
|
||||
|
||||
rc = fsi_master_register(&aspeed->master);
|
||||
|
|
|
@ -110,6 +110,14 @@ config CORESIGHT_SOURCE_ETM4X
|
|||
To compile this driver as a module, choose M here: the
|
||||
module will be called coresight-etm4x.
|
||||
|
||||
config ETM4X_IMPDEF_FEATURE
|
||||
bool "Control implementation defined overflow support in ETM 4.x driver"
|
||||
depends on CORESIGHT_SOURCE_ETM4X
|
||||
help
|
||||
This control provides implementation define control for CoreSight
|
||||
ETM 4.x tracer module that can't reduce commit rate automatically.
|
||||
This avoids overflow between the ETM tracer module and the cpu core.
|
||||
|
||||
config CORESIGHT_STM
|
||||
tristate "CoreSight System Trace Macrocell driver"
|
||||
depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64
|
||||
|
|
|
@ -567,7 +567,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __exit catu_remove(struct amba_device *adev)
|
||||
static int catu_remove(struct amba_device *adev)
|
||||
{
|
||||
struct catu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
|
|
|
@ -418,7 +418,7 @@ static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
|
|||
if (ret) {
|
||||
coresight_control_assoc_ectdev(csdev, false);
|
||||
return ret;
|
||||
};
|
||||
}
|
||||
}
|
||||
csdev->enable = true;
|
||||
}
|
||||
|
@ -432,7 +432,7 @@ static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
|
|||
* coresight_disable_source - Drop the reference count by 1 and disable
|
||||
* the device if there are no users left.
|
||||
*
|
||||
* @csdev - The coresight device to disable
|
||||
* @csdev: The coresight device to disable
|
||||
*
|
||||
* Returns true if the device has been disabled.
|
||||
*/
|
||||
|
@ -663,6 +663,9 @@ struct coresight_device *coresight_get_sink_by_id(u32 id)
|
|||
/**
|
||||
* coresight_get_ref- Helper function to increase reference count to module
|
||||
* and device.
|
||||
*
|
||||
* @csdev: The coresight device to get a reference on.
|
||||
*
|
||||
* Return true in successful case and power up the device.
|
||||
* Return false when failed to get reference of module.
|
||||
*/
|
||||
|
@ -682,6 +685,8 @@ static inline bool coresight_get_ref(struct coresight_device *csdev)
|
|||
/**
|
||||
* coresight_put_ref- Helper function to decrease reference count to module
|
||||
* and device. Power off the device.
|
||||
*
|
||||
* @csdev: The coresight device to decrement a reference from.
|
||||
*/
|
||||
static inline void coresight_put_ref(struct coresight_device *csdev)
|
||||
{
|
||||
|
@ -744,6 +749,7 @@ static void coresight_drop_device(struct coresight_device *csdev)
|
|||
/**
|
||||
* _coresight_build_path - recursively build a path from a @csdev to a sink.
|
||||
* @csdev: The device to start from.
|
||||
* @sink: The final sink we want in this path.
|
||||
* @path: The list to add devices to.
|
||||
*
|
||||
* The tree of Coresight device is traversed until an activated sink is
|
||||
|
|
|
@ -836,7 +836,7 @@ static void cti_device_release(struct device *dev)
|
|||
if (drvdata->csdev_release)
|
||||
drvdata->csdev_release(dev);
|
||||
}
|
||||
static int __exit cti_remove(struct amba_device *adev)
|
||||
static int cti_remove(struct amba_device *adev)
|
||||
{
|
||||
struct cti_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
|
|
|
@ -176,6 +176,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
|
|||
unsigned long flags;
|
||||
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
||||
struct perf_output_handle *handle = data;
|
||||
struct cs_buffers *buf = etm_perf_sink_config(handle);
|
||||
|
||||
spin_lock_irqsave(&drvdata->spinlock, flags);
|
||||
|
||||
|
@ -186,7 +187,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
|
|||
}
|
||||
|
||||
/* Get a handle on the pid of the process to monitor */
|
||||
pid = task_pid_nr(handle->event->owner);
|
||||
pid = buf->pid;
|
||||
|
||||
if (drvdata->pid != -1 && drvdata->pid != pid) {
|
||||
ret = -EBUSY;
|
||||
|
@ -383,6 +384,7 @@ static void *etb_alloc_buffer(struct coresight_device *csdev,
|
|||
if (!buf)
|
||||
return NULL;
|
||||
|
||||
buf->pid = task_pid_nr(event->owner);
|
||||
buf->snapshot = overwrite;
|
||||
buf->nr_pages = nr_pages;
|
||||
buf->data_pages = pages;
|
||||
|
@ -801,7 +803,7 @@ err_misc_register:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __exit etb_remove(struct amba_device *adev)
|
||||
static int etb_remove(struct amba_device *adev)
|
||||
{
|
||||
struct etb_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
|
|
|
@ -902,14 +902,14 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __exit clear_etmdrvdata(void *info)
|
||||
static void clear_etmdrvdata(void *info)
|
||||
{
|
||||
int cpu = *(int *)info;
|
||||
|
||||
etmdrvdata[cpu] = NULL;
|
||||
}
|
||||
|
||||
static int __exit etm_remove(struct amba_device *adev)
|
||||
static int etm_remove(struct amba_device *adev)
|
||||
{
|
||||
struct etm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -28,7 +29,9 @@
|
|||
#include <linux/perf_event.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/property.h>
|
||||
|
||||
#include <asm/sections.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/local.h>
|
||||
#include <asm/virt.h>
|
||||
|
||||
|
@ -103,6 +106,97 @@ struct etm4_enable_arg {
|
|||
int rc;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ETM4X_IMPDEF_FEATURE
|
||||
|
||||
#define HISI_HIP08_AMBA_ID 0x000b6d01
|
||||
#define ETM4_AMBA_MASK 0xfffff
|
||||
#define HISI_HIP08_CORE_COMMIT_MASK 0x3000
|
||||
#define HISI_HIP08_CORE_COMMIT_SHIFT 12
|
||||
#define HISI_HIP08_CORE_COMMIT_FULL 0b00
|
||||
#define HISI_HIP08_CORE_COMMIT_LVL_1 0b01
|
||||
#define HISI_HIP08_CORE_COMMIT_REG sys_reg(3, 1, 15, 2, 5)
|
||||
|
||||
struct etm4_arch_features {
|
||||
void (*arch_callback)(bool enable);
|
||||
};
|
||||
|
||||
static bool etm4_hisi_match_pid(unsigned int id)
|
||||
{
|
||||
return (id & ETM4_AMBA_MASK) == HISI_HIP08_AMBA_ID;
|
||||
}
|
||||
|
||||
static void etm4_hisi_config_core_commit(bool enable)
|
||||
{
|
||||
u8 commit = enable ? HISI_HIP08_CORE_COMMIT_LVL_1 :
|
||||
HISI_HIP08_CORE_COMMIT_FULL;
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* bit 12 and 13 of HISI_HIP08_CORE_COMMIT_REG are used together
|
||||
* to set core-commit, 2'b00 means cpu is at full speed, 2'b01,
|
||||
* 2'b10, 2'b11 mean reduce pipeline speed, and 2'b01 means level-1
|
||||
* speed(minimun value). So bit 12 and 13 should be cleared together.
|
||||
*/
|
||||
val = read_sysreg_s(HISI_HIP08_CORE_COMMIT_REG);
|
||||
val &= ~HISI_HIP08_CORE_COMMIT_MASK;
|
||||
val |= commit << HISI_HIP08_CORE_COMMIT_SHIFT;
|
||||
write_sysreg_s(val, HISI_HIP08_CORE_COMMIT_REG);
|
||||
}
|
||||
|
||||
static struct etm4_arch_features etm4_features[] = {
|
||||
[ETM4_IMPDEF_HISI_CORE_COMMIT] = {
|
||||
.arch_callback = etm4_hisi_config_core_commit,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
struct etm4_arch_features *ftr;
|
||||
int bit;
|
||||
|
||||
for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) {
|
||||
ftr = &etm4_features[bit];
|
||||
|
||||
if (ftr->arch_callback)
|
||||
ftr->arch_callback(true);
|
||||
}
|
||||
}
|
||||
|
||||
static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
struct etm4_arch_features *ftr;
|
||||
int bit;
|
||||
|
||||
for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) {
|
||||
ftr = &etm4_features[bit];
|
||||
|
||||
if (ftr->arch_callback)
|
||||
ftr->arch_callback(false);
|
||||
}
|
||||
}
|
||||
|
||||
static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
|
||||
unsigned int id)
|
||||
{
|
||||
if (etm4_hisi_match_pid(id))
|
||||
set_bit(ETM4_IMPDEF_HISI_CORE_COMMIT, drvdata->arch_features);
|
||||
}
|
||||
#else
|
||||
static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
}
|
||||
|
||||
static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
}
|
||||
|
||||
static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
|
||||
unsigned int id)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_ETM4X_IMPDEF_FEATURE */
|
||||
|
||||
static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
int i, rc;
|
||||
|
@ -110,6 +204,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
|
|||
struct device *etm_dev = &drvdata->csdev->dev;
|
||||
|
||||
CS_UNLOCK(drvdata->base);
|
||||
etm4_enable_arch_specific(drvdata);
|
||||
|
||||
etm4_os_unlock(drvdata);
|
||||
|
||||
|
@ -124,8 +219,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
|
|||
if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
|
||||
dev_err(etm_dev,
|
||||
"timeout while waiting for Idle Trace Status\n");
|
||||
|
||||
writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
|
||||
if (drvdata->nr_pe)
|
||||
writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
|
||||
writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR);
|
||||
/* nothing specific implemented */
|
||||
writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
|
||||
|
@ -141,8 +236,9 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
|
|||
writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR);
|
||||
writel_relaxed(config->vissctlr,
|
||||
drvdata->base + TRCVISSCTLR);
|
||||
writel_relaxed(config->vipcssctlr,
|
||||
drvdata->base + TRCVIPCSSCTLR);
|
||||
if (drvdata->nr_pe_cmp)
|
||||
writel_relaxed(config->vipcssctlr,
|
||||
drvdata->base + TRCVIPCSSCTLR);
|
||||
for (i = 0; i < drvdata->nrseqstate - 1; i++)
|
||||
writel_relaxed(config->seq_ctrl[i],
|
||||
drvdata->base + TRCSEQEVRn(i));
|
||||
|
@ -187,13 +283,15 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
|
|||
writeq_relaxed(config->ctxid_pid[i],
|
||||
drvdata->base + TRCCIDCVRn(i));
|
||||
writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
|
||||
writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
|
||||
if (drvdata->numcidc > 4)
|
||||
writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
|
||||
|
||||
for (i = 0; i < drvdata->numvmidc; i++)
|
||||
writeq_relaxed(config->vmid_val[i],
|
||||
drvdata->base + TRCVMIDCVRn(i));
|
||||
writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
|
||||
writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
|
||||
if (drvdata->numvmidc > 4)
|
||||
writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
|
||||
|
||||
if (!drvdata->skip_power_up) {
|
||||
/*
|
||||
|
@ -476,6 +574,7 @@ static void etm4_disable_hw(void *info)
|
|||
int i;
|
||||
|
||||
CS_UNLOCK(drvdata->base);
|
||||
etm4_disable_arch_specific(drvdata);
|
||||
|
||||
if (!drvdata->skip_power_up) {
|
||||
/* power can be removed from the trace unit now */
|
||||
|
@ -722,8 +821,13 @@ static void etm4_init_arch_data(void *info)
|
|||
else
|
||||
drvdata->sysstall = false;
|
||||
|
||||
/* NUMPROC, bits[30:28] the number of PEs available for tracing */
|
||||
drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
|
||||
/*
|
||||
* NUMPROC - the number of PEs available for tracing, 5bits
|
||||
* = TRCIDR3.bits[13:12]bits[30:28]
|
||||
* bits[4:3] = TRCIDR3.bits[13:12] (since etm-v4.2, otherwise RES0)
|
||||
* bits[3:0] = TRCIDR3.bits[30:28]
|
||||
*/
|
||||
drvdata->nr_pe = (BMVAL(etmidr3, 12, 13) << 3) | BMVAL(etmidr3, 28, 30);
|
||||
|
||||
/* NOOVERFLOW, bit[31] is trace overflow prevention supported */
|
||||
if (BMVAL(etmidr3, 31, 31))
|
||||
|
@ -779,7 +883,7 @@ static void etm4_init_arch_data(void *info)
|
|||
* LPOVERRIDE, bit[23] implementation supports
|
||||
* low-power state override
|
||||
*/
|
||||
if (BMVAL(etmidr5, 23, 23))
|
||||
if (BMVAL(etmidr5, 23, 23) && (!drvdata->skip_power_up))
|
||||
drvdata->lpoverride = true;
|
||||
else
|
||||
drvdata->lpoverride = false;
|
||||
|
@ -1178,7 +1282,8 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
|
|||
state = drvdata->save_state;
|
||||
|
||||
state->trcprgctlr = readl(drvdata->base + TRCPRGCTLR);
|
||||
state->trcprocselr = readl(drvdata->base + TRCPROCSELR);
|
||||
if (drvdata->nr_pe)
|
||||
state->trcprocselr = readl(drvdata->base + TRCPROCSELR);
|
||||
state->trcconfigr = readl(drvdata->base + TRCCONFIGR);
|
||||
state->trcauxctlr = readl(drvdata->base + TRCAUXCTLR);
|
||||
state->trceventctl0r = readl(drvdata->base + TRCEVENTCTL0R);
|
||||
|
@ -1194,7 +1299,8 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
|
|||
state->trcvictlr = readl(drvdata->base + TRCVICTLR);
|
||||
state->trcviiectlr = readl(drvdata->base + TRCVIIECTLR);
|
||||
state->trcvissctlr = readl(drvdata->base + TRCVISSCTLR);
|
||||
state->trcvipcssctlr = readl(drvdata->base + TRCVIPCSSCTLR);
|
||||
if (drvdata->nr_pe_cmp)
|
||||
state->trcvipcssctlr = readl(drvdata->base + TRCVIPCSSCTLR);
|
||||
state->trcvdctlr = readl(drvdata->base + TRCVDCTLR);
|
||||
state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR);
|
||||
state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR);
|
||||
|
@ -1240,10 +1346,12 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
|
|||
state->trcvmidcvr[i] = readq(drvdata->base + TRCVMIDCVRn(i));
|
||||
|
||||
state->trccidcctlr0 = readl(drvdata->base + TRCCIDCCTLR0);
|
||||
state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
|
||||
if (drvdata->numcidc > 4)
|
||||
state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
|
||||
|
||||
state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0);
|
||||
state->trcvmidcctlr1 = readl(drvdata->base + TRCVMIDCCTLR1);
|
||||
if (drvdata->numvmidc > 4)
|
||||
state->trcvmidcctlr1 = readl(drvdata->base + TRCVMIDCCTLR1);
|
||||
|
||||
state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
|
||||
|
||||
|
@ -1283,7 +1391,8 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
|
|||
writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
|
||||
|
||||
writel_relaxed(state->trcprgctlr, drvdata->base + TRCPRGCTLR);
|
||||
writel_relaxed(state->trcprocselr, drvdata->base + TRCPROCSELR);
|
||||
if (drvdata->nr_pe)
|
||||
writel_relaxed(state->trcprocselr, drvdata->base + TRCPROCSELR);
|
||||
writel_relaxed(state->trcconfigr, drvdata->base + TRCCONFIGR);
|
||||
writel_relaxed(state->trcauxctlr, drvdata->base + TRCAUXCTLR);
|
||||
writel_relaxed(state->trceventctl0r, drvdata->base + TRCEVENTCTL0R);
|
||||
|
@ -1299,7 +1408,8 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
|
|||
writel_relaxed(state->trcvictlr, drvdata->base + TRCVICTLR);
|
||||
writel_relaxed(state->trcviiectlr, drvdata->base + TRCVIIECTLR);
|
||||
writel_relaxed(state->trcvissctlr, drvdata->base + TRCVISSCTLR);
|
||||
writel_relaxed(state->trcvipcssctlr, drvdata->base + TRCVIPCSSCTLR);
|
||||
if (drvdata->nr_pe_cmp)
|
||||
writel_relaxed(state->trcvipcssctlr, drvdata->base + TRCVIPCSSCTLR);
|
||||
writel_relaxed(state->trcvdctlr, drvdata->base + TRCVDCTLR);
|
||||
writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR);
|
||||
writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR);
|
||||
|
@ -1350,10 +1460,12 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
|
|||
drvdata->base + TRCVMIDCVRn(i));
|
||||
|
||||
writel_relaxed(state->trccidcctlr0, drvdata->base + TRCCIDCCTLR0);
|
||||
writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
|
||||
if (drvdata->numcidc > 4)
|
||||
writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
|
||||
|
||||
writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0);
|
||||
writel_relaxed(state->trcvmidcctlr1, drvdata->base + TRCVMIDCCTLR1);
|
||||
if (drvdata->numvmidc > 4)
|
||||
writel_relaxed(state->trcvmidcctlr1, drvdata->base + TRCVMIDCCTLR1);
|
||||
|
||||
writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
|
||||
|
||||
|
@ -1547,6 +1659,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
drvdata->boot_enable = true;
|
||||
}
|
||||
|
||||
etm4_check_arch_features(drvdata, id->id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1559,14 +1673,14 @@ static struct amba_cs_uci_id uci_id_etm4[] = {
|
|||
}
|
||||
};
|
||||
|
||||
static void __exit clear_etmdrvdata(void *info)
|
||||
static void clear_etmdrvdata(void *info)
|
||||
{
|
||||
int cpu = *(int *)info;
|
||||
|
||||
etmdrvdata[cpu] = NULL;
|
||||
}
|
||||
|
||||
static int __exit etm4_remove(struct amba_device *adev)
|
||||
static int etm4_remove(struct amba_device *adev)
|
||||
{
|
||||
struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
#include <asm/local.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include "coresight-priv.h"
|
||||
|
||||
/*
|
||||
|
@ -203,6 +204,11 @@
|
|||
/* Interpretation of resource numbers change at ETM v4.3 architecture */
|
||||
#define ETM4X_ARCH_4V3 0x43
|
||||
|
||||
enum etm_impdef_type {
|
||||
ETM4_IMPDEF_HISI_CORE_COMMIT,
|
||||
ETM4_IMPDEF_FEATURE_MAX,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct etmv4_config - configuration information related to an ETMv4
|
||||
* @mode: Controls various modes supported by this ETM.
|
||||
|
@ -415,6 +421,7 @@ struct etmv4_save_state {
|
|||
* @state_needs_restore: True when there is context to restore after PM exit
|
||||
* @skip_power_up: Indicates if an implementation can skip powering up
|
||||
* the trace unit.
|
||||
* @arch_features: Bitmap of arch features of etmv4 devices.
|
||||
*/
|
||||
struct etmv4_drvdata {
|
||||
void __iomem *base;
|
||||
|
@ -463,6 +470,7 @@ struct etmv4_drvdata {
|
|||
struct etmv4_save_state *save_state;
|
||||
bool state_needs_restore;
|
||||
bool skip_power_up;
|
||||
DECLARE_BITMAP(arch_features, ETM4_IMPDEF_FEATURE_MAX);
|
||||
};
|
||||
|
||||
/* Address comparator access types */
|
||||
|
|
|
@ -274,7 +274,7 @@ out_disable_clk:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __exit funnel_remove(struct device *dev)
|
||||
static int funnel_remove(struct device *dev)
|
||||
{
|
||||
struct funnel_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
|
@ -328,7 +328,7 @@ static int static_funnel_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __exit static_funnel_remove(struct platform_device *pdev)
|
||||
static int static_funnel_remove(struct platform_device *pdev)
|
||||
{
|
||||
funnel_remove(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
@ -356,7 +356,7 @@ static struct platform_driver static_funnel_driver = {
|
|||
.remove = static_funnel_remove,
|
||||
.driver = {
|
||||
.name = "coresight-static-funnel",
|
||||
.owner = THIS_MODULE,
|
||||
/* THIS_MODULE is taken care of by platform_driver_register() */
|
||||
.of_match_table = static_funnel_match,
|
||||
.acpi_match_table = ACPI_PTR(static_funnel_ids),
|
||||
.pm = &funnel_dev_pm_ops,
|
||||
|
@ -370,7 +370,7 @@ static int dynamic_funnel_probe(struct amba_device *adev,
|
|||
return funnel_probe(&adev->dev, &adev->res);
|
||||
}
|
||||
|
||||
static int __exit dynamic_funnel_remove(struct amba_device *adev)
|
||||
static int dynamic_funnel_remove(struct amba_device *adev)
|
||||
{
|
||||
return funnel_remove(&adev->dev);
|
||||
}
|
||||
|
|
|
@ -87,6 +87,7 @@ enum cs_mode {
|
|||
* struct cs_buffer - keep track of a recording session' specifics
|
||||
* @cur: index of the current buffer
|
||||
* @nr_pages: max number of pages granted to us
|
||||
* @pid: PID this cs_buffer belongs to
|
||||
* @offset: offset within the current buffer
|
||||
* @data_size: how much we collected in this run
|
||||
* @snapshot: is this run in snapshot mode
|
||||
|
@ -95,6 +96,7 @@ enum cs_mode {
|
|||
struct cs_buffers {
|
||||
unsigned int cur;
|
||||
unsigned int nr_pages;
|
||||
pid_t pid;
|
||||
unsigned long offset;
|
||||
local_t data_size;
|
||||
bool snapshot;
|
||||
|
|
|
@ -291,7 +291,7 @@ out_disable_clk:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __exit replicator_remove(struct device *dev)
|
||||
static int replicator_remove(struct device *dev)
|
||||
{
|
||||
struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
|
@ -318,7 +318,7 @@ static int static_replicator_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __exit static_replicator_remove(struct platform_device *pdev)
|
||||
static int static_replicator_remove(struct platform_device *pdev)
|
||||
{
|
||||
replicator_remove(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
@ -374,7 +374,7 @@ static struct platform_driver static_replicator_driver = {
|
|||
.remove = static_replicator_remove,
|
||||
.driver = {
|
||||
.name = "coresight-static-replicator",
|
||||
.owner = THIS_MODULE,
|
||||
/* THIS_MODULE is taken care of by platform_driver_register() */
|
||||
.of_match_table = of_match_ptr(static_replicator_match),
|
||||
.acpi_match_table = ACPI_PTR(static_replicator_acpi_ids),
|
||||
.pm = &replicator_dev_pm_ops,
|
||||
|
@ -388,7 +388,7 @@ static int dynamic_replicator_probe(struct amba_device *adev,
|
|||
return replicator_probe(&adev->dev, &adev->res);
|
||||
}
|
||||
|
||||
static int __exit dynamic_replicator_remove(struct amba_device *adev)
|
||||
static int dynamic_replicator_remove(struct amba_device *adev)
|
||||
{
|
||||
return replicator_remove(&adev->dev);
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ module_param_named(
|
|||
boot_nr_channel, boot_nr_channel, int, S_IRUGO
|
||||
);
|
||||
|
||||
/**
|
||||
/*
|
||||
* struct channel_space - central management entity for extended ports
|
||||
* @base: memory mapped base address where channels start.
|
||||
* @phys: physical base address of channel region.
|
||||
|
@ -951,7 +951,7 @@ stm_unregister:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __exit stm_remove(struct amba_device *adev)
|
||||
static int stm_remove(struct amba_device *adev)
|
||||
{
|
||||
struct stm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
|
|
|
@ -559,7 +559,7 @@ out:
|
|||
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
||||
}
|
||||
|
||||
static int __exit tmc_remove(struct amba_device *adev)
|
||||
static int tmc_remove(struct amba_device *adev)
|
||||
{
|
||||
struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
|
|
|
@ -227,6 +227,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
|
|||
unsigned long flags;
|
||||
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
||||
struct perf_output_handle *handle = data;
|
||||
struct cs_buffers *buf = etm_perf_sink_config(handle);
|
||||
|
||||
spin_lock_irqsave(&drvdata->spinlock, flags);
|
||||
do {
|
||||
|
@ -243,7 +244,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
|
|||
}
|
||||
|
||||
/* Get a handle on the pid of the process to monitor */
|
||||
pid = task_pid_nr(handle->event->owner);
|
||||
pid = buf->pid;
|
||||
|
||||
if (drvdata->pid != -1 && drvdata->pid != pid) {
|
||||
ret = -EBUSY;
|
||||
|
@ -399,6 +400,7 @@ static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
|
|||
if (!buf)
|
||||
return NULL;
|
||||
|
||||
buf->pid = task_pid_nr(event->owner);
|
||||
buf->snapshot = overwrite;
|
||||
buf->nr_pages = nr_pages;
|
||||
buf->data_pages = pages;
|
||||
|
|
|
@ -217,6 +217,8 @@ static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
|
|||
} else {
|
||||
page = alloc_pages_node(node,
|
||||
GFP_KERNEL | __GFP_ZERO, 0);
|
||||
if (!page)
|
||||
goto err;
|
||||
}
|
||||
paddr = dma_map_page(real_dev, page, 0, PAGE_SIZE, dir);
|
||||
if (dma_mapping_error(real_dev, paddr))
|
||||
|
@ -954,11 +956,11 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
|
|||
dev_dbg(&drvdata->csdev->dev,
|
||||
"tmc memory error detected, truncating buffer\n");
|
||||
etr_buf->len = 0;
|
||||
etr_buf->full = 0;
|
||||
etr_buf->full = false;
|
||||
return;
|
||||
}
|
||||
|
||||
etr_buf->full = status & TMC_STS_FULL;
|
||||
etr_buf->full = !!(status & TMC_STS_FULL);
|
||||
|
||||
WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
|
||||
|
||||
|
@ -1550,7 +1552,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
|
|||
|
||||
/* Insert barrier packets at the beginning, if there was an overflow */
|
||||
if (lost)
|
||||
tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
|
||||
tmc_etr_buf_insert_barrier_packet(etr_buf, offset);
|
||||
tmc_etr_sync_perf_buffer(etr_perf, offset, size);
|
||||
|
||||
/*
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
|
||||
DEFINE_CORESIGHT_DEVLIST(tpiu_devs, "tpiu");
|
||||
|
||||
/**
|
||||
/*
|
||||
* @base: memory mapped base address for this component.
|
||||
* @atclk: optional clock for the core parts of the TPIU.
|
||||
* @csdev: component vitals needed by the framework.
|
||||
|
@ -173,7 +173,7 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
return PTR_ERR(drvdata->csdev);
|
||||
}
|
||||
|
||||
static int __exit tpiu_remove(struct amba_device *adev)
|
||||
static int tpiu_remove(struct amba_device *adev)
|
||||
{
|
||||
struct tpiu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
|
|
|
@ -13,5 +13,6 @@ if INTERCONNECT
|
|||
|
||||
source "drivers/interconnect/imx/Kconfig"
|
||||
source "drivers/interconnect/qcom/Kconfig"
|
||||
source "drivers/interconnect/samsung/Kconfig"
|
||||
|
||||
endif
|
||||
|
|
|
@ -6,3 +6,4 @@ icc-core-objs := core.o bulk.o
|
|||
obj-$(CONFIG_INTERCONNECT) += icc-core.o
|
||||
obj-$(CONFIG_INTERCONNECT_IMX) += imx/
|
||||
obj-$(CONFIG_INTERCONNECT_QCOM) += qcom/
|
||||
obj-$(CONFIG_INTERCONNECT_SAMSUNG) += samsung/
|
||||
|
|
|
@ -41,17 +41,10 @@ struct bcm_voter {
|
|||
|
||||
static int cmp_vcd(void *priv, struct list_head *a, struct list_head *b)
|
||||
{
|
||||
const struct qcom_icc_bcm *bcm_a =
|
||||
list_entry(a, struct qcom_icc_bcm, list);
|
||||
const struct qcom_icc_bcm *bcm_b =
|
||||
list_entry(b, struct qcom_icc_bcm, list);
|
||||
const struct qcom_icc_bcm *bcm_a = list_entry(a, struct qcom_icc_bcm, list);
|
||||
const struct qcom_icc_bcm *bcm_b = list_entry(b, struct qcom_icc_bcm, list);
|
||||
|
||||
if (bcm_a->aux_data.vcd < bcm_b->aux_data.vcd)
|
||||
return -1;
|
||||
else if (bcm_a->aux_data.vcd == bcm_b->aux_data.vcd)
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
return bcm_a->aux_data.vcd - bcm_b->aux_data.vcd;
|
||||
}
|
||||
|
||||
static u64 bcm_div(u64 num, u32 base)
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config INTERCONNECT_SAMSUNG
|
||||
bool "Samsung SoC interconnect drivers"
|
||||
depends on ARCH_EXYNOS || COMPILE_TEST
|
||||
help
|
||||
Interconnect drivers for Samsung SoCs.
|
||||
|
||||
config INTERCONNECT_EXYNOS
|
||||
tristate "Exynos generic interconnect driver"
|
||||
depends on INTERCONNECT_SAMSUNG
|
||||
default y if ARCH_EXYNOS
|
||||
help
|
||||
Generic interconnect driver for Exynos SoCs.
|
|
@ -0,0 +1,4 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
exynos-interconnect-objs := exynos.o
|
||||
|
||||
obj-$(CONFIG_INTERCONNECT_EXYNOS) += exynos-interconnect.o
|
|
@ -0,0 +1,199 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Exynos generic interconnect provider driver
|
||||
*
|
||||
* Copyright (c) 2020 Samsung Electronics Co., Ltd.
|
||||
*
|
||||
* Authors: Artur Świgoń <a.swigon@samsung.com>
|
||||
* Sylwester Nawrocki <s.nawrocki@samsung.com>
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
#include <linux/interconnect-provider.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#define EXYNOS_ICC_DEFAULT_BUS_CLK_RATIO 8
|
||||
|
||||
struct exynos_icc_priv {
|
||||
struct device *dev;
|
||||
|
||||
/* One interconnect node per provider */
|
||||
struct icc_provider provider;
|
||||
struct icc_node *node;
|
||||
|
||||
struct dev_pm_qos_request qos_req;
|
||||
u32 bus_clk_ratio;
|
||||
};
|
||||
|
||||
static struct icc_node *exynos_icc_get_parent(struct device_node *np)
|
||||
{
|
||||
struct of_phandle_args args;
|
||||
struct icc_node_data *icc_node_data;
|
||||
struct icc_node *icc_node;
|
||||
int num, ret;
|
||||
|
||||
num = of_count_phandle_with_args(np, "interconnects",
|
||||
"#interconnect-cells");
|
||||
if (num < 1)
|
||||
return NULL; /* parent nodes are optional */
|
||||
|
||||
/* Get the interconnect target node */
|
||||
ret = of_parse_phandle_with_args(np, "interconnects",
|
||||
"#interconnect-cells", 0, &args);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
icc_node_data = of_icc_get_from_provider(&args);
|
||||
of_node_put(args.np);
|
||||
|
||||
if (IS_ERR(icc_node_data))
|
||||
return ERR_CAST(icc_node_data);
|
||||
|
||||
icc_node = icc_node_data->node;
|
||||
kfree(icc_node_data);
|
||||
|
||||
return icc_node;
|
||||
}
|
||||
|
||||
static int exynos_generic_icc_set(struct icc_node *src, struct icc_node *dst)
|
||||
{
|
||||
struct exynos_icc_priv *src_priv = src->data, *dst_priv = dst->data;
|
||||
s32 src_freq = max(src->avg_bw, src->peak_bw) / src_priv->bus_clk_ratio;
|
||||
s32 dst_freq = max(dst->avg_bw, dst->peak_bw) / dst_priv->bus_clk_ratio;
|
||||
int ret;
|
||||
|
||||
ret = dev_pm_qos_update_request(&src_priv->qos_req, src_freq);
|
||||
if (ret < 0) {
|
||||
dev_err(src_priv->dev, "failed to update PM QoS of %s (src)\n",
|
||||
src->name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = dev_pm_qos_update_request(&dst_priv->qos_req, dst_freq);
|
||||
if (ret < 0) {
|
||||
dev_err(dst_priv->dev, "failed to update PM QoS of %s (dst)\n",
|
||||
dst->name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct icc_node *exynos_generic_icc_xlate(struct of_phandle_args *spec,
|
||||
void *data)
|
||||
{
|
||||
struct exynos_icc_priv *priv = data;
|
||||
|
||||
if (spec->np != priv->dev->parent->of_node)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
return priv->node;
|
||||
}
|
||||
|
||||
static int exynos_generic_icc_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct exynos_icc_priv *priv = platform_get_drvdata(pdev);
|
||||
struct icc_node *parent_node, *node = priv->node;
|
||||
|
||||
parent_node = exynos_icc_get_parent(priv->dev->parent->of_node);
|
||||
if (parent_node && !IS_ERR(parent_node))
|
||||
icc_link_destroy(node, parent_node);
|
||||
|
||||
icc_nodes_remove(&priv->provider);
|
||||
icc_provider_del(&priv->provider);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int exynos_generic_icc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *bus_dev = pdev->dev.parent;
|
||||
struct exynos_icc_priv *priv;
|
||||
struct icc_provider *provider;
|
||||
struct icc_node *icc_node, *icc_parent_node;
|
||||
int ret;
|
||||
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->dev = &pdev->dev;
|
||||
platform_set_drvdata(pdev, priv);
|
||||
|
||||
provider = &priv->provider;
|
||||
|
||||
provider->set = exynos_generic_icc_set;
|
||||
provider->aggregate = icc_std_aggregate;
|
||||
provider->xlate = exynos_generic_icc_xlate;
|
||||
provider->dev = bus_dev;
|
||||
provider->inter_set = true;
|
||||
provider->data = priv;
|
||||
|
||||
ret = icc_provider_add(provider);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
icc_node = icc_node_create(pdev->id);
|
||||
if (IS_ERR(icc_node)) {
|
||||
ret = PTR_ERR(icc_node);
|
||||
goto err_prov_del;
|
||||
}
|
||||
|
||||
priv->node = icc_node;
|
||||
icc_node->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
|
||||
bus_dev->of_node);
|
||||
if (of_property_read_u32(bus_dev->of_node, "samsung,data-clock-ratio",
|
||||
&priv->bus_clk_ratio))
|
||||
priv->bus_clk_ratio = EXYNOS_ICC_DEFAULT_BUS_CLK_RATIO;
|
||||
|
||||
/*
|
||||
* Register a PM QoS request for the parent (devfreq) device.
|
||||
*/
|
||||
ret = dev_pm_qos_add_request(bus_dev, &priv->qos_req,
|
||||
DEV_PM_QOS_MIN_FREQUENCY, 0);
|
||||
if (ret < 0)
|
||||
goto err_node_del;
|
||||
|
||||
icc_node->data = priv;
|
||||
icc_node_add(icc_node, provider);
|
||||
|
||||
icc_parent_node = exynos_icc_get_parent(bus_dev->of_node);
|
||||
if (IS_ERR(icc_parent_node)) {
|
||||
ret = PTR_ERR(icc_parent_node);
|
||||
goto err_pmqos_del;
|
||||
}
|
||||
if (icc_parent_node) {
|
||||
ret = icc_link_create(icc_node, icc_parent_node->id);
|
||||
if (ret < 0)
|
||||
goto err_pmqos_del;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_pmqos_del:
|
||||
dev_pm_qos_remove_request(&priv->qos_req);
|
||||
err_node_del:
|
||||
icc_nodes_remove(provider);
|
||||
err_prov_del:
|
||||
icc_provider_del(provider);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct platform_driver exynos_generic_icc_driver = {
|
||||
.driver = {
|
||||
.name = "exynos-generic-icc",
|
||||
.sync_state = icc_sync_state,
|
||||
},
|
||||
.probe = exynos_generic_icc_probe,
|
||||
.remove = exynos_generic_icc_remove,
|
||||
};
|
||||
module_platform_driver(exynos_generic_icc_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Exynos generic interconnect driver");
|
||||
MODULE_AUTHOR("Artur Świgoń <a.swigon@samsung.com>");
|
||||
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS("platform:exynos-generic-icc");
|
|
@ -544,7 +544,6 @@ static void ipoctal_set_termios(struct tty_struct *tty,
|
|||
break;
|
||||
default:
|
||||
return;
|
||||
break;
|
||||
}
|
||||
|
||||
baud = tty_get_baud_rate(tty);
|
||||
|
|
|
@ -2265,11 +2265,6 @@ static int altera_check_crc(u8 *p, s32 program_size)
|
|||
"actual %04x\n", __func__, local_expected,
|
||||
local_actual);
|
||||
break;
|
||||
case -ENODATA:
|
||||
printk(KERN_ERR "%s: expected CRC not found, "
|
||||
"actual CRC = %04x\n", __func__,
|
||||
local_actual);
|
||||
break;
|
||||
case -EIO:
|
||||
printk(KERN_ERR "%s: error: format isn't "
|
||||
"recognized.\n", __func__);
|
||||
|
|
|
@ -899,7 +899,7 @@ struct c2port_device *c2port_device_register(char *name,
|
|||
unlikely(!ops->c2d_get) || unlikely(!ops->c2d_set))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
|
||||
c2dev = kzalloc(sizeof(struct c2port_device), GFP_KERNEL);
|
||||
if (unlikely(!c2dev))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
@ -73,6 +73,9 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
|
|||
|
||||
pci_read_config_dword(pdev, PCR_SETTING_REG2, ®);
|
||||
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
|
||||
|
||||
pcr->rtd3_en = rtsx_reg_to_rtd3_uhsii(reg);
|
||||
|
||||
if (rtsx_check_mmc_support(reg))
|
||||
pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
|
||||
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
|
||||
|
@ -278,15 +281,28 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
|
|||
|
||||
rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
|
||||
|
||||
if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
|
||||
if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A))
|
||||
rtsx_pci_write_register(pcr, REG_VREF, PWD_SUSPND_EN, PWD_SUSPND_EN);
|
||||
rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x00);
|
||||
rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x20);
|
||||
|
||||
if (pcr->rtd3_en) {
|
||||
if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
|
||||
rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x01);
|
||||
rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x30);
|
||||
} else {
|
||||
rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x01);
|
||||
rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x33);
|
||||
}
|
||||
} else {
|
||||
rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x30);
|
||||
rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x00);
|
||||
if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
|
||||
rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x00);
|
||||
rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x20);
|
||||
} else {
|
||||
rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x30);
|
||||
rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x00);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
|
||||
* to drive low, and we forcibly request clock.
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
#include <linux/rtsx_pci.h>
|
||||
#include <linux/mmc/card.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include "rtsx_pcr.h"
|
||||
#include "rts5261.h"
|
||||
|
@ -89,9 +91,15 @@ static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
|
|||
if (pcr->aspm_enabled == enable)
|
||||
return;
|
||||
|
||||
pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
|
||||
PCI_EXP_LNKCTL_ASPMC,
|
||||
enable ? pcr->aspm_en : 0);
|
||||
if (pcr->aspm_en & 0x02)
|
||||
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
|
||||
FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
|
||||
else
|
||||
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
|
||||
FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
|
||||
|
||||
if (!enable && (pcr->aspm_en & 0x02))
|
||||
mdelay(10);
|
||||
|
||||
pcr->aspm_enabled = enable;
|
||||
}
|
||||
|
@ -144,6 +152,12 @@ void rtsx_pci_start_run(struct rtsx_pcr *pcr)
|
|||
if (pcr->remove_pci)
|
||||
return;
|
||||
|
||||
if (pcr->rtd3_en)
|
||||
if (pcr->is_runtime_suspended) {
|
||||
pm_runtime_get(&(pcr->pci->dev));
|
||||
pcr->is_runtime_suspended = false;
|
||||
}
|
||||
|
||||
if (pcr->state != PDEV_STAT_RUN) {
|
||||
pcr->state = PDEV_STAT_RUN;
|
||||
if (pcr->ops->enable_auto_blink)
|
||||
|
@ -1075,6 +1089,16 @@ static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
|
|||
rtsx_comm_pm_power_saving(pcr);
|
||||
}
|
||||
|
||||
static void rtsx_pci_rtd3_work(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *dwork = to_delayed_work(work);
|
||||
struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, rtd3_work);
|
||||
|
||||
pcr_dbg(pcr, "--> %s\n", __func__);
|
||||
if (!pcr->is_runtime_suspended)
|
||||
pm_runtime_put(&(pcr->pci->dev));
|
||||
}
|
||||
|
||||
static void rtsx_pci_idle_work(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *dwork = to_delayed_work(work);
|
||||
|
@ -1094,6 +1118,9 @@ static void rtsx_pci_idle_work(struct work_struct *work)
|
|||
rtsx_pm_power_saving(pcr);
|
||||
|
||||
mutex_unlock(&pcr->pcr_mutex);
|
||||
|
||||
if (pcr->rtd3_en)
|
||||
mod_delayed_work(system_wq, &pcr->rtd3_work, msecs_to_jiffies(10000));
|
||||
}
|
||||
|
||||
static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
|
||||
|
@ -1283,7 +1310,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
|
|||
/* Wait SSC power stable */
|
||||
udelay(200);
|
||||
|
||||
rtsx_pci_disable_aspm(pcr);
|
||||
rtsx_disable_aspm(pcr);
|
||||
if (pcr->ops->optimize_phy) {
|
||||
err = pcr->ops->optimize_phy(pcr);
|
||||
if (err < 0)
|
||||
|
@ -1357,8 +1384,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
|
|||
rtsx_pci_init_ocp(pcr);
|
||||
|
||||
/* Enable clk_request_n to enable clock power management */
|
||||
pcie_capability_write_word(pdev, PCI_EXP_LNKCTL,
|
||||
PCI_EXP_LNKCTL_CLKREQ_EN);
|
||||
pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
|
||||
0, PCI_EXP_LNKCTL_CLKREQ_EN);
|
||||
/* Enter L1 when host tx idle */
|
||||
pci_write_config_byte(pdev, 0x70F, 0x5B);
|
||||
|
||||
|
@ -1368,6 +1395,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
|
|||
return err;
|
||||
}
|
||||
|
||||
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
|
||||
|
||||
/* No CD interrupt if probing driver with card inserted.
|
||||
* So we need to initialize pcr->card_exist here.
|
||||
*/
|
||||
|
@ -1571,6 +1600,15 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
|
|||
rtsx_pcr_cells[i].platform_data = handle;
|
||||
rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
|
||||
}
|
||||
|
||||
if (pcr->rtd3_en) {
|
||||
INIT_DELAYED_WORK(&pcr->rtd3_work, rtsx_pci_rtd3_work);
|
||||
pm_runtime_allow(&pcidev->dev);
|
||||
pm_runtime_enable(&pcidev->dev);
|
||||
pcr->is_runtime_suspended = false;
|
||||
}
|
||||
|
||||
|
||||
ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
|
||||
ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
|
||||
if (ret < 0)
|
||||
|
@ -1608,6 +1646,9 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
|
|||
struct pcr_handle *handle = pci_get_drvdata(pcidev);
|
||||
struct rtsx_pcr *pcr = handle->pcr;
|
||||
|
||||
if (pcr->rtd3_en)
|
||||
pm_runtime_get_noresume(&pcr->pci->dev);
|
||||
|
||||
pcr->remove_pci = true;
|
||||
|
||||
/* Disable interrupts at the pcr level */
|
||||
|
@ -1618,6 +1659,8 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
|
|||
|
||||
cancel_delayed_work_sync(&pcr->carddet_work);
|
||||
cancel_delayed_work_sync(&pcr->idle_work);
|
||||
if (pcr->rtd3_en)
|
||||
cancel_delayed_work_sync(&pcr->rtd3_work);
|
||||
|
||||
mfd_remove_devices(&pcidev->dev);
|
||||
|
||||
|
@ -1635,6 +1678,11 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
|
|||
idr_remove(&rtsx_pci_idr, pcr->id);
|
||||
spin_unlock(&rtsx_pci_lock);
|
||||
|
||||
if (pcr->rtd3_en) {
|
||||
pm_runtime_disable(&pcr->pci->dev);
|
||||
pm_runtime_put_noidle(&pcr->pci->dev);
|
||||
}
|
||||
|
||||
kfree(pcr->slots);
|
||||
kfree(pcr);
|
||||
kfree(handle);
|
||||
|
@ -1716,13 +1764,77 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev)
|
|||
pci_disable_msi(pcr->pci);
|
||||
}
|
||||
|
||||
static int rtsx_pci_runtime_suspend(struct device *device)
|
||||
{
|
||||
struct pci_dev *pcidev = to_pci_dev(device);
|
||||
struct pcr_handle *handle;
|
||||
struct rtsx_pcr *pcr;
|
||||
|
||||
handle = pci_get_drvdata(pcidev);
|
||||
pcr = handle->pcr;
|
||||
dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
|
||||
|
||||
cancel_delayed_work(&pcr->carddet_work);
|
||||
cancel_delayed_work(&pcr->rtd3_work);
|
||||
cancel_delayed_work(&pcr->idle_work);
|
||||
|
||||
mutex_lock(&pcr->pcr_mutex);
|
||||
rtsx_pci_power_off(pcr, HOST_ENTER_S3);
|
||||
|
||||
free_irq(pcr->irq, (void *)pcr);
|
||||
|
||||
mutex_unlock(&pcr->pcr_mutex);
|
||||
|
||||
pcr->is_runtime_suspended = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rtsx_pci_runtime_resume(struct device *device)
|
||||
{
|
||||
struct pci_dev *pcidev = to_pci_dev(device);
|
||||
struct pcr_handle *handle;
|
||||
struct rtsx_pcr *pcr;
|
||||
int ret = 0;
|
||||
|
||||
handle = pci_get_drvdata(pcidev);
|
||||
pcr = handle->pcr;
|
||||
dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
|
||||
|
||||
mutex_lock(&pcr->pcr_mutex);
|
||||
|
||||
rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
|
||||
rtsx_pci_acquire_irq(pcr);
|
||||
synchronize_irq(pcr->irq);
|
||||
|
||||
if (pcr->ops->fetch_vendor_settings)
|
||||
pcr->ops->fetch_vendor_settings(pcr);
|
||||
|
||||
rtsx_pci_init_hw(pcr);
|
||||
|
||||
if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
|
||||
pcr->slots[RTSX_SD_CARD].card_event(
|
||||
pcr->slots[RTSX_SD_CARD].p_dev);
|
||||
}
|
||||
|
||||
schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
|
||||
|
||||
mutex_unlock(&pcr->pcr_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#else /* CONFIG_PM */
|
||||
|
||||
#define rtsx_pci_shutdown NULL
|
||||
#define rtsx_pci_runtime_suspend NULL
|
||||
#define rtsx_pic_runtime_resume NULL
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(rtsx_pci_pm_ops, rtsx_pci_suspend, rtsx_pci_resume);
|
||||
static const struct dev_pm_ops rtsx_pci_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
|
||||
SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
static struct pci_driver rtsx_pci_driver = {
|
||||
.name = DRV_NAME_RTSX_PCI,
|
||||
|
|
|
@ -90,6 +90,7 @@ static inline u8 map_sd_drive(int idx)
|
|||
|
||||
#define rtsx_check_mmc_support(reg) ((reg) & 0x10)
|
||||
#define rtsx_reg_to_rtd3(reg) ((reg) & 0x02)
|
||||
#define rtsx_reg_to_rtd3_uhsii(reg) ((reg) & 0x04)
|
||||
#define rtsx_reg_to_aspm(reg) (((reg) >> 28) & 0x03)
|
||||
#define rtsx_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 26) & 0x03)
|
||||
#define rtsx_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x03)
|
||||
|
|
|
@ -22,6 +22,9 @@
|
|||
* mean that some AT25 products are EEPROMs, and others are FLASH.
|
||||
* Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver,
|
||||
* not this one!
|
||||
*
|
||||
* EEPROMs that can be used with this driver include, for example:
|
||||
* AT25M02, AT25128B
|
||||
*/
|
||||
|
||||
struct at25_data {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue