Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Conflicts: tools/testing/selftests/net/config62199e3f16
("selftests: net: Add VXLAN MDB test")3a0385be13
("selftests: add the missing CONFIG_IP_SCTP in net config") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
800e68c44f
2
.mailmap
2
.mailmap
|
@ -265,7 +265,9 @@ Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
|
|||
Krzysztof Kozlowski <krzk@kernel.org> <krzysztof.kozlowski@canonical.com>
|
||||
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
|
||||
Kuogee Hsieh <quic_khsieh@quicinc.com> <khsieh@codeaurora.org>
|
||||
Leonard Crestez <leonard.crestez@nxp.com> Leonard Crestez <cdleonard@gmail.com>
|
||||
Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
|
||||
Leonard Göhrs <l.goehrs@pengutronix.de>
|
||||
Leonid I Ananiev <leonid.i.ananiev@intel.com>
|
||||
Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
|
||||
Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com>
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/interrupt-controller/loongarch,cpu-interrupt-controller.yaml#
|
||||
$id: http://devicetree.org/schemas/interrupt-controller/loongson,cpu-interrupt-controller.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: LoongArch CPU Interrupt Controller
|
||||
|
@ -11,7 +11,7 @@ maintainers:
|
|||
|
||||
properties:
|
||||
compatible:
|
||||
const: loongarch,cpu-interrupt-controller
|
||||
const: loongson,cpu-interrupt-controller
|
||||
|
||||
'#interrupt-cells':
|
||||
const: 1
|
||||
|
@ -28,7 +28,7 @@ required:
|
|||
examples:
|
||||
- |
|
||||
interrupt-controller {
|
||||
compatible = "loongarch,cpu-interrupt-controller";
|
||||
compatible = "loongson,cpu-interrupt-controller";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-controller;
|
||||
};
|
|
@ -92,7 +92,7 @@ properties:
|
|||
- description: Error interrupt
|
||||
- description: Receive buffer full interrupt
|
||||
- description: Transmit buffer empty interrupt
|
||||
- description: Transmit End interrupt
|
||||
- description: Break interrupt
|
||||
- items:
|
||||
- description: Error interrupt
|
||||
- description: Receive buffer full interrupt
|
||||
|
@ -107,7 +107,7 @@ properties:
|
|||
- const: eri
|
||||
- const: rxi
|
||||
- const: txi
|
||||
- const: tei
|
||||
- const: bri
|
||||
- items:
|
||||
- const: eri
|
||||
- const: rxi
|
||||
|
|
|
@ -39,13 +39,12 @@ With CONFIG_ZSMALLOC_STAT, we could see zsmalloc internal information via
|
|||
|
||||
# cat /sys/kernel/debug/zsmalloc/zram0/classes
|
||||
|
||||
class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage
|
||||
class size 10% 20% 30% 40% 50% 60% 70% 80% 90% 99% 100% obj_allocated obj_used pages_used pages_per_zspage freeable
|
||||
...
|
||||
...
|
||||
9 176 0 1 186 129 8 4
|
||||
10 192 1 0 2880 2872 135 3
|
||||
11 208 0 1 819 795 42 2
|
||||
12 224 0 1 219 159 12 4
|
||||
30 512 0 12 4 1 0 1 0 0 1 0 414 3464 3346 433 1 14
|
||||
31 528 2 7 2 2 1 0 1 0 0 2 117 4154 3793 536 4 44
|
||||
32 544 6 3 4 1 2 1 0 0 0 1 260 4170 3965 556 2 26
|
||||
...
|
||||
...
|
||||
|
||||
|
@ -54,10 +53,28 @@ class
|
|||
index
|
||||
size
|
||||
object size zspage stores
|
||||
almost_empty
|
||||
the number of ZS_ALMOST_EMPTY zspages(see below)
|
||||
almost_full
|
||||
the number of ZS_ALMOST_FULL zspages(see below)
|
||||
10%
|
||||
the number of zspages with usage ratio less than 10% (see below)
|
||||
20%
|
||||
the number of zspages with usage ratio between 10% and 20%
|
||||
30%
|
||||
the number of zspages with usage ratio between 20% and 30%
|
||||
40%
|
||||
the number of zspages with usage ratio between 30% and 40%
|
||||
50%
|
||||
the number of zspages with usage ratio between 40% and 50%
|
||||
60%
|
||||
the number of zspages with usage ratio between 50% and 60%
|
||||
70%
|
||||
the number of zspages with usage ratio between 60% and 70%
|
||||
80%
|
||||
the number of zspages with usage ratio between 70% and 80%
|
||||
90%
|
||||
the number of zspages with usage ratio between 80% and 90%
|
||||
99%
|
||||
the number of zspages with usage ratio between 90% and 99%
|
||||
100%
|
||||
the number of zspages with usage ratio 100%
|
||||
obj_allocated
|
||||
the number of objects allocated
|
||||
obj_used
|
||||
|
@ -66,19 +83,14 @@ pages_used
|
|||
the number of pages allocated for the class
|
||||
pages_per_zspage
|
||||
the number of 0-order pages to make a zspage
|
||||
freeable
|
||||
the approximate number of pages class compaction can free
|
||||
|
||||
We assign a zspage to ZS_ALMOST_EMPTY fullness group when n <= N / f, where
|
||||
|
||||
* n = number of allocated objects
|
||||
* N = total number of objects zspage can store
|
||||
* f = fullness_threshold_frac(ie, 4 at the moment)
|
||||
|
||||
Similarly, we assign zspage to:
|
||||
|
||||
* ZS_ALMOST_FULL when n > N / f
|
||||
* ZS_EMPTY when n == 0
|
||||
* ZS_FULL when n == N
|
||||
|
||||
Each zspage maintains inuse counter which keeps track of the number of
|
||||
objects stored in the zspage. The inuse counter determines the zspage's
|
||||
"fullness group" which is calculated as the ratio of the "inuse" objects to
|
||||
the total number of objects the zspage can hold (objs_per_zspage). The
|
||||
closer the inuse counter is to objs_per_zspage, the better.
|
||||
|
||||
Internals
|
||||
=========
|
||||
|
@ -94,10 +106,10 @@ of objects that each zspage can store.
|
|||
|
||||
For instance, consider the following size classes:::
|
||||
|
||||
class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable
|
||||
class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
|
||||
...
|
||||
94 1536 0 0 0 0 0 3 0
|
||||
100 1632 0 0 0 0 0 2 0
|
||||
94 1536 0 .... 0 0 0 0 3 0
|
||||
100 1632 0 .... 0 0 0 0 2 0
|
||||
...
|
||||
|
||||
|
||||
|
@ -134,10 +146,11 @@ reduces memory wastage.
|
|||
|
||||
Let's take a closer look at the bottom of `/sys/kernel/debug/zsmalloc/zramX/classes`:::
|
||||
|
||||
class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable
|
||||
class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
|
||||
|
||||
...
|
||||
202 3264 0 0 0 0 0 4 0
|
||||
254 4096 0 0 0 0 0 1 0
|
||||
202 3264 0 .. 0 0 0 0 4 0
|
||||
254 4096 0 .. 0 0 0 0 1 0
|
||||
...
|
||||
|
||||
Size class #202 stores objects of size 3264 bytes and has a maximum of 4 pages
|
||||
|
@ -151,40 +164,42 @@ efficient storage of large objects.
|
|||
|
||||
For zspage chain size of 8, huge class watermark becomes 3632 bytes:::
|
||||
|
||||
class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable
|
||||
class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
|
||||
|
||||
...
|
||||
202 3264 0 0 0 0 0 4 0
|
||||
211 3408 0 0 0 0 0 5 0
|
||||
217 3504 0 0 0 0 0 6 0
|
||||
222 3584 0 0 0 0 0 7 0
|
||||
225 3632 0 0 0 0 0 8 0
|
||||
254 4096 0 0 0 0 0 1 0
|
||||
202 3264 0 .. 0 0 0 0 4 0
|
||||
211 3408 0 .. 0 0 0 0 5 0
|
||||
217 3504 0 .. 0 0 0 0 6 0
|
||||
222 3584 0 .. 0 0 0 0 7 0
|
||||
225 3632 0 .. 0 0 0 0 8 0
|
||||
254 4096 0 .. 0 0 0 0 1 0
|
||||
...
|
||||
|
||||
For zspage chain size of 16, huge class watermark becomes 3840 bytes:::
|
||||
|
||||
class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable
|
||||
class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
|
||||
|
||||
...
|
||||
202 3264 0 0 0 0 0 4 0
|
||||
206 3328 0 0 0 0 0 13 0
|
||||
207 3344 0 0 0 0 0 9 0
|
||||
208 3360 0 0 0 0 0 14 0
|
||||
211 3408 0 0 0 0 0 5 0
|
||||
212 3424 0 0 0 0 0 16 0
|
||||
214 3456 0 0 0 0 0 11 0
|
||||
217 3504 0 0 0 0 0 6 0
|
||||
219 3536 0 0 0 0 0 13 0
|
||||
222 3584 0 0 0 0 0 7 0
|
||||
223 3600 0 0 0 0 0 15 0
|
||||
225 3632 0 0 0 0 0 8 0
|
||||
228 3680 0 0 0 0 0 9 0
|
||||
230 3712 0 0 0 0 0 10 0
|
||||
232 3744 0 0 0 0 0 11 0
|
||||
234 3776 0 0 0 0 0 12 0
|
||||
235 3792 0 0 0 0 0 13 0
|
||||
236 3808 0 0 0 0 0 14 0
|
||||
238 3840 0 0 0 0 0 15 0
|
||||
254 4096 0 0 0 0 0 1 0
|
||||
202 3264 0 .. 0 0 0 0 4 0
|
||||
206 3328 0 .. 0 0 0 0 13 0
|
||||
207 3344 0 .. 0 0 0 0 9 0
|
||||
208 3360 0 .. 0 0 0 0 14 0
|
||||
211 3408 0 .. 0 0 0 0 5 0
|
||||
212 3424 0 .. 0 0 0 0 16 0
|
||||
214 3456 0 .. 0 0 0 0 11 0
|
||||
217 3504 0 .. 0 0 0 0 6 0
|
||||
219 3536 0 .. 0 0 0 0 13 0
|
||||
222 3584 0 .. 0 0 0 0 7 0
|
||||
223 3600 0 .. 0 0 0 0 15 0
|
||||
225 3632 0 .. 0 0 0 0 8 0
|
||||
228 3680 0 .. 0 0 0 0 9 0
|
||||
230 3712 0 .. 0 0 0 0 10 0
|
||||
232 3744 0 .. 0 0 0 0 11 0
|
||||
234 3776 0 .. 0 0 0 0 12 0
|
||||
235 3792 0 .. 0 0 0 0 13 0
|
||||
236 3808 0 .. 0 0 0 0 14 0
|
||||
238 3840 0 .. 0 0 0 0 15 0
|
||||
254 4096 0 .. 0 0 0 0 1 0
|
||||
...
|
||||
|
||||
Overall the combined zspage chain size effect on zsmalloc pool configuration:::
|
||||
|
@ -214,9 +229,10 @@ zram as a build artifacts storage (Linux kernel compilation).
|
|||
|
||||
zsmalloc classes stats:::
|
||||
|
||||
class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable
|
||||
class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
|
||||
|
||||
...
|
||||
Total 13 51 413836 412973 159955 3
|
||||
Total 13 .. 51 413836 412973 159955 3
|
||||
|
||||
zram mm_stat:::
|
||||
|
||||
|
@ -227,9 +243,10 @@ zram as a build artifacts storage (Linux kernel compilation).
|
|||
|
||||
zsmalloc classes stats:::
|
||||
|
||||
class size almost_full almost_empty obj_allocated obj_used pages_used pages_per_zspage freeable
|
||||
class size 10% .... 100% obj_allocated obj_used pages_used pages_per_zspage freeable
|
||||
|
||||
...
|
||||
Total 18 87 414852 412978 156666 0
|
||||
Total 18 .. 87 414852 412978 156666 0
|
||||
|
||||
zram mm_stat:::
|
||||
|
||||
|
|
|
@ -340,6 +340,8 @@ tcp_app_win - INTEGER
|
|||
Reserve max(window/2^tcp_app_win, mss) of window for application
|
||||
buffer. Value 0 is special, it means that nothing is reserved.
|
||||
|
||||
Possible values are [0, 31], inclusive.
|
||||
|
||||
Default: 31
|
||||
|
||||
tcp_autocorking - BOOLEAN
|
||||
|
|
16
MAINTAINERS
16
MAINTAINERS
|
@ -224,13 +224,13 @@ S: Orphan / Obsolete
|
|||
F: drivers/net/ethernet/8390/
|
||||
|
||||
9P FILE SYSTEM
|
||||
M: Eric Van Hensbergen <ericvh@gmail.com>
|
||||
M: Eric Van Hensbergen <ericvh@kernel.org>
|
||||
M: Latchesar Ionkov <lucho@ionkov.net>
|
||||
M: Dominique Martinet <asmadeus@codewreck.org>
|
||||
R: Christian Schoenebeck <linux_oss@crudebyte.com>
|
||||
L: v9fs-developer@lists.sourceforge.net
|
||||
L: v9fs@lists.linux.dev
|
||||
S: Maintained
|
||||
W: http://swik.net/v9fs
|
||||
W: http://github.com/v9fs
|
||||
Q: http://patchwork.kernel.org/project/v9fs-devel/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ericvh/v9fs.git
|
||||
T: git git://github.com/martinetd/linux.git
|
||||
|
@ -4468,14 +4468,14 @@ F: Documentation/devicetree/bindings/net/ieee802154/ca8210.txt
|
|||
F: drivers/net/ieee802154/ca8210.c
|
||||
|
||||
CANAAN/KENDRYTE K210 SOC FPIOA DRIVER
|
||||
M: Damien Le Moal <damien.lemoal@wdc.com>
|
||||
M: Damien Le Moal <dlemoal@kernel.org>
|
||||
L: linux-riscv@lists.infradead.org
|
||||
L: linux-gpio@vger.kernel.org (pinctrl driver)
|
||||
F: Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml
|
||||
F: drivers/pinctrl/pinctrl-k210.c
|
||||
|
||||
CANAAN/KENDRYTE K210 SOC RESET CONTROLLER DRIVER
|
||||
M: Damien Le Moal <damien.lemoal@wdc.com>
|
||||
M: Damien Le Moal <dlemoal@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
L: linux-riscv@lists.infradead.org
|
||||
S: Maintained
|
||||
|
@ -4483,7 +4483,7 @@ F: Documentation/devicetree/bindings/reset/canaan,k210-rst.yaml
|
|||
F: drivers/reset/reset-k210.c
|
||||
|
||||
CANAAN/KENDRYTE K210 SOC SYSTEM CONTROLLER DRIVER
|
||||
M: Damien Le Moal <damien.lemoal@wdc.com>
|
||||
M: Damien Le Moal <dlemoal@kernel.org>
|
||||
L: linux-riscv@lists.infradead.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml
|
||||
|
@ -11765,7 +11765,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
|
|||
F: drivers/ata/sata_promise.*
|
||||
|
||||
LIBATA SUBSYSTEM (Serial and Parallel ATA drivers)
|
||||
M: Damien Le Moal <damien.lemoal@opensource.wdc.com>
|
||||
M: Damien Le Moal <dlemoal@kernel.org>
|
||||
L: linux-ide@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/libata.git
|
||||
|
@ -23139,7 +23139,7 @@ S: Maintained
|
|||
F: arch/x86/kernel/cpu/zhaoxin.c
|
||||
|
||||
ZONEFS FILESYSTEM
|
||||
M: Damien Le Moal <damien.lemoal@opensource.wdc.com>
|
||||
M: Damien Le Moal <dlemoal@kernel.org>
|
||||
M: Naohiro Aota <naohiro.aota@wdc.com>
|
||||
R: Johannes Thumshirn <jth@kernel.org>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 3
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -314,36 +314,32 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
|
|||
int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
|
||||
unsigned int type;
|
||||
u32 instr = 0;
|
||||
u16 tinstr = 0;
|
||||
int isize = 4;
|
||||
int thumb2_32b = 0;
|
||||
int fault;
|
||||
|
||||
instrptr = instruction_pointer(regs);
|
||||
|
||||
if (compat_thumb_mode(regs)) {
|
||||
__le16 __user *ptr = (__le16 __user *)(instrptr & ~1);
|
||||
u16 tinstr, tinst2;
|
||||
|
||||
fault = alignment_get_thumb(regs, ptr, &tinstr);
|
||||
if (!fault) {
|
||||
if (IS_T32(tinstr)) {
|
||||
/* Thumb-2 32-bit */
|
||||
u16 tinst2;
|
||||
fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
|
||||
instr = ((u32)tinstr << 16) | tinst2;
|
||||
thumb2_32b = 1;
|
||||
} else {
|
||||
isize = 2;
|
||||
instr = thumb2arm(tinstr);
|
||||
}
|
||||
if (alignment_get_thumb(regs, ptr, &tinstr))
|
||||
return 1;
|
||||
|
||||
if (IS_T32(tinstr)) { /* Thumb-2 32-bit */
|
||||
if (alignment_get_thumb(regs, ptr + 1, &tinst2))
|
||||
return 1;
|
||||
instr = ((u32)tinstr << 16) | tinst2;
|
||||
thumb2_32b = 1;
|
||||
} else {
|
||||
isize = 2;
|
||||
instr = thumb2arm(tinstr);
|
||||
}
|
||||
} else {
|
||||
fault = alignment_get_arm(regs, (__le32 __user *)instrptr, &instr);
|
||||
if (alignment_get_arm(regs, (__le32 __user *)instrptr, &instr))
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (fault)
|
||||
return 1;
|
||||
|
||||
switch (CODING_BITS(instr)) {
|
||||
case 0x00000000: /* 3.13.4 load/store instruction extensions */
|
||||
if (LDSTHD_I_BIT(instr))
|
||||
|
|
|
@ -1890,9 +1890,33 @@ static int __init do_pkvm_init(u32 hyp_va_bits)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static u64 get_hyp_id_aa64pfr0_el1(void)
|
||||
{
|
||||
/*
|
||||
* Track whether the system isn't affected by spectre/meltdown in the
|
||||
* hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
|
||||
* Although this is per-CPU, we make it global for simplicity, e.g., not
|
||||
* to have to worry about vcpu migration.
|
||||
*
|
||||
* Unlike for non-protected VMs, userspace cannot override this for
|
||||
* protected VMs.
|
||||
*/
|
||||
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
|
||||
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
|
||||
arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
|
||||
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
|
||||
arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void kvm_hyp_init_symbols(void)
|
||||
{
|
||||
kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
||||
kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
|
||||
kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
|
||||
kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
|
||||
kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
|
||||
|
|
|
@ -33,11 +33,14 @@
|
|||
* Allow for protected VMs:
|
||||
* - Floating-point and Advanced SIMD
|
||||
* - Data Independent Timing
|
||||
* - Spectre/Meltdown Mitigation
|
||||
*/
|
||||
#define PVM_ID_AA64PFR0_ALLOW (\
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \
|
||||
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \
|
||||
)
|
||||
|
||||
/*
|
||||
|
|
|
@ -85,19 +85,12 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
|
|||
|
||||
static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
|
||||
u64 set_mask = 0;
|
||||
u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
|
||||
|
||||
set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
|
||||
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
|
||||
|
||||
/* Spectre and Meltdown mitigation in KVM */
|
||||
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
|
||||
(u64)kvm->arch.pfr0_csv2);
|
||||
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
|
||||
(u64)kvm->arch.pfr0_csv3);
|
||||
|
||||
return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
|
||||
}
|
||||
|
||||
|
|
|
@ -558,6 +558,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
|||
for_each_set_bit(i, &mask, 32)
|
||||
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
|
||||
}
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
}
|
||||
|
||||
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
|
||||
|
|
|
@ -794,7 +794,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
if (!kvm_supports_32bit_el0())
|
||||
val |= ARMV8_PMU_PMCR_LC;
|
||||
kvm_pmu_handle_pmcr(vcpu, val);
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
} else {
|
||||
/* PMCR.P & PMCR.C are RAZ */
|
||||
val = __vcpu_sys_reg(vcpu, PMCR_EL0)
|
||||
|
|
|
@ -281,4 +281,8 @@
|
|||
/* DMB */
|
||||
#define A64_DMB_ISH aarch64_insn_gen_dmb(AARCH64_INSN_MB_ISH)
|
||||
|
||||
/* ADR */
|
||||
#define A64_ADR(Rd, offset) \
|
||||
aarch64_insn_gen_adr(0, offset, Rd, AARCH64_INSN_ADR_TYPE_ADR)
|
||||
|
||||
#endif /* _BPF_JIT_H */
|
||||
|
|
|
@ -1900,7 +1900,8 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
|
|||
restore_args(ctx, args_off, nargs);
|
||||
/* call original func */
|
||||
emit(A64_LDR64I(A64_R(10), A64_SP, retaddr_off), ctx);
|
||||
emit(A64_BLR(A64_R(10)), ctx);
|
||||
emit(A64_ADR(A64_LR, AARCH64_INSN_SIZE * 2), ctx);
|
||||
emit(A64_RET(A64_R(10)), ctx);
|
||||
/* store return value */
|
||||
emit(A64_STR64I(A64_R(0), A64_SP, retval_off), ctx);
|
||||
/* reserve a nop for bpf_tramp_image_put */
|
||||
|
|
|
@ -1022,6 +1022,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
|||
emit_atomic(insn, ctx);
|
||||
break;
|
||||
|
||||
/* Speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_err("bpf_jit: unknown opcode %02x\n", code);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -3,9 +3,14 @@ core-y += arch/x86/crypto/
|
|||
|
||||
#
|
||||
# Disable SSE and other FP/SIMD instructions to match normal x86
|
||||
# This is required to work around issues in older LLVM versions, but breaks
|
||||
# GCC versions < 11. See:
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99652
|
||||
#
|
||||
ifeq ($(CONFIG_CC_IS_CLANG),y)
|
||||
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
|
||||
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_X86_32),y)
|
||||
START := 0x8048000
|
||||
|
|
|
@ -125,6 +125,8 @@
|
|||
|
||||
#define INTEL_FAM6_LUNARLAKE_M 0xBD
|
||||
|
||||
#define INTEL_FAM6_ARROWLAKE 0xC6
|
||||
|
||||
/* "Small Core" Processors (Atom/E-Core) */
|
||||
|
||||
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
|
||||
|
|
|
@ -146,7 +146,11 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
|
|||
|
||||
pr_debug("Local APIC address 0x%08x\n", madt->address);
|
||||
}
|
||||
if (madt->header.revision >= 5)
|
||||
|
||||
/* ACPI 6.3 and newer support the online capable bit. */
|
||||
if (acpi_gbl_FADT.header.revision > 6 ||
|
||||
(acpi_gbl_FADT.header.revision == 6 &&
|
||||
acpi_gbl_FADT.minor_revision >= 3))
|
||||
acpi_support_online_capable = true;
|
||||
|
||||
default_acpi_madt_oem_check(madt->header.oem_id,
|
||||
|
@ -193,7 +197,8 @@ static bool __init acpi_is_processor_usable(u32 lapic_flags)
|
|||
if (lapic_flags & ACPI_MADT_ENABLED)
|
||||
return true;
|
||||
|
||||
if (acpi_support_online_capable && (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
|
||||
if (!acpi_support_online_capable ||
|
||||
(lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include <linux/dmi.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/vgaarb.h>
|
||||
#include <asm/amd_nb.h>
|
||||
#include <asm/hpet.h>
|
||||
#include <asm/pci_x86.h>
|
||||
|
||||
|
@ -824,3 +825,23 @@ static void rs690_fix_64bit_dma(struct pci_dev *pdev)
|
|||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_AMD_NB
|
||||
|
||||
#define AMD_15B8_RCC_DEV2_EPF0_STRAP2 0x10136008
|
||||
#define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK 0x00000080L
|
||||
|
||||
static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev)
|
||||
{
|
||||
u32 data;
|
||||
|
||||
if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) {
|
||||
data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK;
|
||||
if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data))
|
||||
pci_err(dev, "Failed to write data 0x%x\n", data);
|
||||
} else {
|
||||
pci_err(dev, "Failed to read data\n");
|
||||
}
|
||||
}
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0);
|
||||
#endif
|
||||
|
|
|
@ -1359,8 +1359,6 @@ bool blk_rq_is_poll(struct request *rq)
|
|||
return false;
|
||||
if (rq->mq_hctx->type != HCTX_TYPE_POLL)
|
||||
return false;
|
||||
if (WARN_ON_ONCE(!rq->bio))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_rq_is_poll);
|
||||
|
@ -1368,7 +1366,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
|
|||
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
|
||||
{
|
||||
do {
|
||||
bio_poll(rq->bio, NULL, 0);
|
||||
blk_mq_poll(rq->q, blk_rq_to_qc(rq), NULL, 0);
|
||||
cond_resched();
|
||||
} while (!completion_done(wait));
|
||||
}
|
||||
|
|
|
@ -368,7 +368,6 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
|
|||
if (disk->open_partitions)
|
||||
return -EBUSY;
|
||||
|
||||
set_bit(GD_NEED_PART_SCAN, &disk->state);
|
||||
/*
|
||||
* If the device is opened exclusively by current thread already, it's
|
||||
* safe to scan partitons, otherwise, use bd_prepare_to_claim() to
|
||||
|
@ -381,12 +380,19 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
|
|||
return ret;
|
||||
}
|
||||
|
||||
set_bit(GD_NEED_PART_SCAN, &disk->state);
|
||||
bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL);
|
||||
if (IS_ERR(bdev))
|
||||
ret = PTR_ERR(bdev);
|
||||
else
|
||||
blkdev_put(bdev, mode & ~FMODE_EXCL);
|
||||
|
||||
/*
|
||||
* If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set,
|
||||
* and this will cause that re-assemble partitioned raid device will
|
||||
* creat partition for underlying disk.
|
||||
*/
|
||||
clear_bit(GD_NEED_PART_SCAN, &disk->state);
|
||||
if (!(mode & FMODE_EXCL))
|
||||
bd_abort_claiming(disk->part0, disk_scan_partitions);
|
||||
return ret;
|
||||
|
|
|
@ -1984,6 +1984,7 @@ static int instance;
|
|||
static int acpi_video_bus_add(struct acpi_device *device)
|
||||
{
|
||||
struct acpi_video_bus *video;
|
||||
bool auto_detect;
|
||||
int error;
|
||||
acpi_status status;
|
||||
|
||||
|
@ -2045,10 +2046,20 @@ static int acpi_video_bus_add(struct acpi_device *device)
|
|||
mutex_unlock(&video_list_lock);
|
||||
|
||||
/*
|
||||
* The userspace visible backlight_device gets registered separately
|
||||
* from acpi_video_register_backlight().
|
||||
* If backlight-type auto-detection is used then a native backlight may
|
||||
* show up later and this may change the result from video to native.
|
||||
* Therefor normally the userspace visible /sys/class/backlight device
|
||||
* gets registered separately by the GPU driver calling
|
||||
* acpi_video_register_backlight() when an internal panel is detected.
|
||||
* Register the backlight now when not using auto-detection, so that
|
||||
* when the kernel cmdline or DMI-quirks are used the backlight will
|
||||
* get registered even if acpi_video_register_backlight() is not called.
|
||||
*/
|
||||
acpi_video_run_bcl_for_osi(video);
|
||||
if (__acpi_video_get_backlight_type(false, &auto_detect) == acpi_backlight_video &&
|
||||
!auto_detect)
|
||||
acpi_video_bus_register_backlight(video);
|
||||
|
||||
acpi_video_bus_add_notify_handler(video);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -276,6 +276,43 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
|||
},
|
||||
},
|
||||
|
||||
/*
|
||||
* Models which need acpi_video backlight control where the GPU drivers
|
||||
* do not call acpi_video_register_backlight() because no internal panel
|
||||
* is detected. Typically these are all-in-ones (monitors with builtin
|
||||
* PC) where the panel connection shows up as regular DP instead of eDP.
|
||||
*/
|
||||
{
|
||||
.callback = video_detect_force_video,
|
||||
/* Apple iMac14,1 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,1"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_video,
|
||||
/* Apple iMac14,2 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,2"),
|
||||
},
|
||||
},
|
||||
|
||||
/*
|
||||
* Older models with nvidia GPU which need acpi_video backlight
|
||||
* control and where the old nvidia binary driver series does not
|
||||
* call acpi_video_register_backlight().
|
||||
*/
|
||||
{
|
||||
.callback = video_detect_force_video,
|
||||
/* ThinkPad W530 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
|
||||
},
|
||||
},
|
||||
|
||||
/*
|
||||
* These models have a working acpi_video backlight control, and using
|
||||
* native backlight causes a regression where backlight does not work
|
||||
|
@ -782,7 +819,7 @@ static bool prefer_native_over_acpi_video(void)
|
|||
* Determine which type of backlight interface to use on this system,
|
||||
* First check cmdline, then dmi quirks, then do autodetect.
|
||||
*/
|
||||
static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
|
||||
enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto_detect)
|
||||
{
|
||||
static DEFINE_MUTEX(init_mutex);
|
||||
static bool nvidia_wmi_ec_present;
|
||||
|
@ -807,6 +844,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
|
|||
native_available = true;
|
||||
mutex_unlock(&init_mutex);
|
||||
|
||||
if (auto_detect)
|
||||
*auto_detect = false;
|
||||
|
||||
/*
|
||||
* The below heuristics / detection steps are in order of descending
|
||||
* presedence. The commandline takes presedence over anything else.
|
||||
|
@ -818,6 +858,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
|
|||
if (acpi_backlight_dmi != acpi_backlight_undef)
|
||||
return acpi_backlight_dmi;
|
||||
|
||||
if (auto_detect)
|
||||
*auto_detect = true;
|
||||
|
||||
/* Special cases such as nvidia_wmi_ec and apple gmux. */
|
||||
if (nvidia_wmi_ec_present)
|
||||
return acpi_backlight_nvidia_wmi_ec;
|
||||
|
@ -837,15 +880,4 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
|
|||
/* No ACPI video/native (old hw), use vendor specific fw methods. */
|
||||
return acpi_backlight_vendor;
|
||||
}
|
||||
|
||||
enum acpi_backlight_type acpi_video_get_backlight_type(void)
|
||||
{
|
||||
return __acpi_video_get_backlight_type(false);
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_video_get_backlight_type);
|
||||
|
||||
bool acpi_video_backlight_use_native(void)
|
||||
{
|
||||
return __acpi_video_get_backlight_type(true) == acpi_backlight_native;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_video_backlight_use_native);
|
||||
EXPORT_SYMBOL(__acpi_video_get_backlight_type);
|
||||
|
|
|
@ -246,7 +246,7 @@ static int ublk_validate_params(const struct ublk_device *ub)
|
|||
if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
|
||||
const struct ublk_param_basic *p = &ub->params.basic;
|
||||
|
||||
if (p->logical_bs_shift > PAGE_SHIFT)
|
||||
if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
|
||||
return -EINVAL;
|
||||
|
||||
if (p->logical_bs_shift > p->physical_bs_shift)
|
||||
|
@ -1261,9 +1261,10 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
|
|||
ublk_queue_cmd(ubq, req);
|
||||
}
|
||||
|
||||
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
|
||||
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
|
||||
unsigned int issue_flags,
|
||||
struct ublksrv_io_cmd *ub_cmd)
|
||||
{
|
||||
struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
|
||||
struct ublk_device *ub = cmd->file->private_data;
|
||||
struct ublk_queue *ubq;
|
||||
struct ublk_io *io;
|
||||
|
@ -1362,6 +1363,23 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
|
|||
return -EIOCBQUEUED;
|
||||
}
|
||||
|
||||
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
|
||||
{
|
||||
struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
|
||||
struct ublksrv_io_cmd ub_cmd;
|
||||
|
||||
/*
|
||||
* Not necessary for async retry, but let's keep it simple and always
|
||||
* copy the values to avoid any potential reuse.
|
||||
*/
|
||||
ub_cmd.q_id = READ_ONCE(ub_src->q_id);
|
||||
ub_cmd.tag = READ_ONCE(ub_src->tag);
|
||||
ub_cmd.result = READ_ONCE(ub_src->result);
|
||||
ub_cmd.addr = READ_ONCE(ub_src->addr);
|
||||
|
||||
return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
|
||||
}
|
||||
|
||||
static const struct file_operations ublk_ch_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = ublk_ch_open,
|
||||
|
@ -1952,6 +1970,8 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
|
|||
/* clear all we don't support yet */
|
||||
ub->params.types &= UBLK_PARAM_TYPE_ALL;
|
||||
ret = ublk_validate_params(ub);
|
||||
if (ret)
|
||||
ub->params.types = 0;
|
||||
}
|
||||
mutex_unlock(&ub->mutex);
|
||||
|
||||
|
|
|
@ -96,16 +96,14 @@ struct virtblk_req {
|
|||
|
||||
/*
|
||||
* The zone append command has an extended in header.
|
||||
* The status field in zone_append_in_hdr must have
|
||||
* the same offset in virtblk_req as the non-zoned
|
||||
* status field above.
|
||||
* The status field in zone_append_in_hdr must always
|
||||
* be the last byte.
|
||||
*/
|
||||
struct {
|
||||
__virtio64 sector;
|
||||
u8 status;
|
||||
u8 reserved[7];
|
||||
__le64 append_sector;
|
||||
} zone_append_in_hdr;
|
||||
};
|
||||
} zone_append;
|
||||
} in_hdr;
|
||||
|
||||
size_t in_hdr_len;
|
||||
|
||||
|
@ -154,7 +152,7 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
|
|||
sgs[num_out + num_in++] = vbr->sg_table.sgl;
|
||||
}
|
||||
|
||||
sg_init_one(&in_hdr, &vbr->status, vbr->in_hdr_len);
|
||||
sg_init_one(&in_hdr, &vbr->in_hdr.status, vbr->in_hdr_len);
|
||||
sgs[num_out + num_in++] = &in_hdr;
|
||||
|
||||
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
|
||||
|
@ -242,11 +240,14 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
|
|||
struct request *req,
|
||||
struct virtblk_req *vbr)
|
||||
{
|
||||
size_t in_hdr_len = sizeof(vbr->status);
|
||||
size_t in_hdr_len = sizeof(vbr->in_hdr.status);
|
||||
bool unmap = false;
|
||||
u32 type;
|
||||
u64 sector = 0;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && op_is_zone_mgmt(req_op(req)))
|
||||
return BLK_STS_NOTSUPP;
|
||||
|
||||
/* Set fields for all request types */
|
||||
vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
|
||||
|
||||
|
@ -287,7 +288,7 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
|
|||
case REQ_OP_ZONE_APPEND:
|
||||
type = VIRTIO_BLK_T_ZONE_APPEND;
|
||||
sector = blk_rq_pos(req);
|
||||
in_hdr_len = sizeof(vbr->zone_append_in_hdr);
|
||||
in_hdr_len = sizeof(vbr->in_hdr.zone_append);
|
||||
break;
|
||||
case REQ_OP_ZONE_RESET:
|
||||
type = VIRTIO_BLK_T_ZONE_RESET;
|
||||
|
@ -297,7 +298,10 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
|
|||
type = VIRTIO_BLK_T_ZONE_RESET_ALL;
|
||||
break;
|
||||
case REQ_OP_DRV_IN:
|
||||
/* Out header already filled in, nothing to do */
|
||||
/*
|
||||
* Out header has already been prepared by the caller (virtblk_get_id()
|
||||
* or virtblk_submit_zone_report()), nothing to do here.
|
||||
*/
|
||||
return 0;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
|
@ -318,16 +322,28 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The status byte is always the last byte of the virtblk request
|
||||
* in-header. This helper fetches its value for all in-header formats
|
||||
* that are currently defined.
|
||||
*/
|
||||
static inline u8 virtblk_vbr_status(struct virtblk_req *vbr)
|
||||
{
|
||||
return *((u8 *)&vbr->in_hdr + vbr->in_hdr_len - 1);
|
||||
}
|
||||
|
||||
static inline void virtblk_request_done(struct request *req)
|
||||
{
|
||||
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
|
||||
blk_status_t status = virtblk_result(vbr->status);
|
||||
blk_status_t status = virtblk_result(virtblk_vbr_status(vbr));
|
||||
struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
|
||||
|
||||
virtblk_unmap_data(req, vbr);
|
||||
virtblk_cleanup_cmd(req);
|
||||
|
||||
if (req_op(req) == REQ_OP_ZONE_APPEND)
|
||||
req->__sector = le64_to_cpu(vbr->zone_append_in_hdr.append_sector);
|
||||
req->__sector = virtio64_to_cpu(vblk->vdev,
|
||||
vbr->in_hdr.zone_append.sector);
|
||||
|
||||
blk_mq_end_request(req, status);
|
||||
}
|
||||
|
@ -355,7 +371,7 @@ static int virtblk_handle_req(struct virtio_blk_vq *vq,
|
|||
|
||||
if (likely(!blk_should_fake_timeout(req->q)) &&
|
||||
!blk_mq_complete_request_remote(req) &&
|
||||
!blk_mq_add_to_batch(req, iob, vbr->status,
|
||||
!blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
|
||||
virtblk_complete_batch))
|
||||
virtblk_request_done(req);
|
||||
req_done++;
|
||||
|
@ -550,7 +566,6 @@ static void virtio_queue_rqs(struct request **rqlist)
|
|||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
|
||||
unsigned int nr_zones,
|
||||
unsigned int zone_sectors,
|
||||
size_t *buflen)
|
||||
{
|
||||
struct request_queue *q = vblk->disk->queue;
|
||||
|
@ -558,7 +573,7 @@ static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
|
|||
void *buf;
|
||||
|
||||
nr_zones = min_t(unsigned int, nr_zones,
|
||||
get_capacity(vblk->disk) >> ilog2(zone_sectors));
|
||||
get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors));
|
||||
|
||||
bufsize = sizeof(struct virtio_blk_zone_report) +
|
||||
nr_zones * sizeof(struct virtio_blk_zone_descriptor);
|
||||
|
@ -592,7 +607,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
|
|||
return PTR_ERR(req);
|
||||
|
||||
vbr = blk_mq_rq_to_pdu(req);
|
||||
vbr->in_hdr_len = sizeof(vbr->status);
|
||||
vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
|
||||
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
|
||||
vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
|
||||
|
||||
|
@ -601,7 +616,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
|
|||
goto out;
|
||||
|
||||
blk_execute_rq(req, false);
|
||||
err = blk_status_to_errno(virtblk_result(vbr->status));
|
||||
err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
|
||||
out:
|
||||
blk_mq_free_request(req);
|
||||
return err;
|
||||
|
@ -609,29 +624,72 @@ out:
|
|||
|
||||
static int virtblk_parse_zone(struct virtio_blk *vblk,
|
||||
struct virtio_blk_zone_descriptor *entry,
|
||||
unsigned int idx, unsigned int zone_sectors,
|
||||
report_zones_cb cb, void *data)
|
||||
unsigned int idx, report_zones_cb cb, void *data)
|
||||
{
|
||||
struct blk_zone zone = { };
|
||||
|
||||
if (entry->z_type != VIRTIO_BLK_ZT_SWR &&
|
||||
entry->z_type != VIRTIO_BLK_ZT_SWP &&
|
||||
entry->z_type != VIRTIO_BLK_ZT_CONV) {
|
||||
dev_err(&vblk->vdev->dev, "invalid zone type %#x\n",
|
||||
entry->z_type);
|
||||
return -EINVAL;
|
||||
zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start);
|
||||
if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk))
|
||||
zone.len = vblk->zone_sectors;
|
||||
else
|
||||
zone.len = get_capacity(vblk->disk) - zone.start;
|
||||
zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap);
|
||||
zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp);
|
||||
|
||||
switch (entry->z_type) {
|
||||
case VIRTIO_BLK_ZT_SWR:
|
||||
zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
|
||||
break;
|
||||
case VIRTIO_BLK_ZT_SWP:
|
||||
zone.type = BLK_ZONE_TYPE_SEQWRITE_PREF;
|
||||
break;
|
||||
case VIRTIO_BLK_ZT_CONV:
|
||||
zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
|
||||
break;
|
||||
default:
|
||||
dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n",
|
||||
zone.start, entry->z_type);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
zone.type = entry->z_type;
|
||||
zone.cond = entry->z_state;
|
||||
zone.len = zone_sectors;
|
||||
zone.capacity = le64_to_cpu(entry->z_cap);
|
||||
zone.start = le64_to_cpu(entry->z_start);
|
||||
if (zone.cond == BLK_ZONE_COND_FULL)
|
||||
switch (entry->z_state) {
|
||||
case VIRTIO_BLK_ZS_EMPTY:
|
||||
zone.cond = BLK_ZONE_COND_EMPTY;
|
||||
break;
|
||||
case VIRTIO_BLK_ZS_CLOSED:
|
||||
zone.cond = BLK_ZONE_COND_CLOSED;
|
||||
break;
|
||||
case VIRTIO_BLK_ZS_FULL:
|
||||
zone.cond = BLK_ZONE_COND_FULL;
|
||||
zone.wp = zone.start + zone.len;
|
||||
else
|
||||
zone.wp = le64_to_cpu(entry->z_wp);
|
||||
break;
|
||||
case VIRTIO_BLK_ZS_EOPEN:
|
||||
zone.cond = BLK_ZONE_COND_EXP_OPEN;
|
||||
break;
|
||||
case VIRTIO_BLK_ZS_IOPEN:
|
||||
zone.cond = BLK_ZONE_COND_IMP_OPEN;
|
||||
break;
|
||||
case VIRTIO_BLK_ZS_NOT_WP:
|
||||
zone.cond = BLK_ZONE_COND_NOT_WP;
|
||||
break;
|
||||
case VIRTIO_BLK_ZS_RDONLY:
|
||||
zone.cond = BLK_ZONE_COND_READONLY;
|
||||
zone.wp = ULONG_MAX;
|
||||
break;
|
||||
case VIRTIO_BLK_ZS_OFFLINE:
|
||||
zone.cond = BLK_ZONE_COND_OFFLINE;
|
||||
zone.wp = ULONG_MAX;
|
||||
break;
|
||||
default:
|
||||
dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n",
|
||||
zone.start, entry->z_state);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* The callback below checks the validity of the reported
|
||||
* entry data, no need to further validate it here.
|
||||
*/
|
||||
return cb(&zone, idx, data);
|
||||
}
|
||||
|
||||
|
@ -641,39 +699,47 @@ static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
|
|||
{
|
||||
struct virtio_blk *vblk = disk->private_data;
|
||||
struct virtio_blk_zone_report *report;
|
||||
unsigned int zone_sectors = vblk->zone_sectors;
|
||||
unsigned int nz, i;
|
||||
int ret, zone_idx = 0;
|
||||
unsigned long long nz, i;
|
||||
size_t buflen;
|
||||
unsigned int zone_idx = 0;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON_ONCE(!vblk->zone_sectors))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
report = virtblk_alloc_report_buffer(vblk, nr_zones,
|
||||
zone_sectors, &buflen);
|
||||
report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen);
|
||||
if (!report)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&vblk->vdev_mutex);
|
||||
|
||||
if (!vblk->vdev) {
|
||||
ret = -ENXIO;
|
||||
goto fail_report;
|
||||
}
|
||||
|
||||
while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) {
|
||||
memset(report, 0, buflen);
|
||||
|
||||
ret = virtblk_submit_zone_report(vblk, (char *)report,
|
||||
buflen, sector);
|
||||
if (ret) {
|
||||
if (ret > 0)
|
||||
ret = -EIO;
|
||||
goto out_free;
|
||||
}
|
||||
nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones);
|
||||
if (ret)
|
||||
goto fail_report;
|
||||
|
||||
nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones),
|
||||
nr_zones);
|
||||
if (!nz)
|
||||
break;
|
||||
|
||||
for (i = 0; i < nz && zone_idx < nr_zones; i++) {
|
||||
ret = virtblk_parse_zone(vblk, &report->zones[i],
|
||||
zone_idx, zone_sectors, cb, data);
|
||||
zone_idx, cb, data);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
sector = le64_to_cpu(report->zones[i].z_start) + zone_sectors;
|
||||
goto fail_report;
|
||||
|
||||
sector = virtio64_to_cpu(vblk->vdev,
|
||||
report->zones[i].z_start) +
|
||||
vblk->zone_sectors;
|
||||
zone_idx++;
|
||||
}
|
||||
}
|
||||
|
@ -682,7 +748,8 @@ static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
|
|||
ret = zone_idx;
|
||||
else
|
||||
ret = -EINVAL;
|
||||
out_free:
|
||||
fail_report:
|
||||
mutex_unlock(&vblk->vdev_mutex);
|
||||
kvfree(report);
|
||||
return ret;
|
||||
}
|
||||
|
@ -691,20 +758,28 @@ static void virtblk_revalidate_zones(struct virtio_blk *vblk)
|
|||
{
|
||||
u8 model;
|
||||
|
||||
if (!vblk->zone_sectors)
|
||||
return;
|
||||
|
||||
virtio_cread(vblk->vdev, struct virtio_blk_config,
|
||||
zoned.model, &model);
|
||||
if (!blk_revalidate_disk_zones(vblk->disk, NULL))
|
||||
set_capacity_and_notify(vblk->disk, 0);
|
||||
switch (model) {
|
||||
default:
|
||||
dev_err(&vblk->vdev->dev, "unknown zone model %d\n", model);
|
||||
fallthrough;
|
||||
case VIRTIO_BLK_Z_NONE:
|
||||
case VIRTIO_BLK_Z_HA:
|
||||
disk_set_zoned(vblk->disk, BLK_ZONED_NONE);
|
||||
return;
|
||||
case VIRTIO_BLK_Z_HM:
|
||||
WARN_ON_ONCE(!vblk->zone_sectors);
|
||||
if (!blk_revalidate_disk_zones(vblk->disk, NULL))
|
||||
set_capacity_and_notify(vblk->disk, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static int virtblk_probe_zoned_device(struct virtio_device *vdev,
|
||||
struct virtio_blk *vblk,
|
||||
struct request_queue *q)
|
||||
{
|
||||
u32 v;
|
||||
u32 v, wg;
|
||||
u8 model;
|
||||
int ret;
|
||||
|
||||
|
@ -713,16 +788,11 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
|
|||
|
||||
switch (model) {
|
||||
case VIRTIO_BLK_Z_NONE:
|
||||
case VIRTIO_BLK_Z_HA:
|
||||
/* Present the host-aware device as non-zoned */
|
||||
return 0;
|
||||
case VIRTIO_BLK_Z_HM:
|
||||
break;
|
||||
case VIRTIO_BLK_Z_HA:
|
||||
/*
|
||||
* Present the host-aware device as a regular drive.
|
||||
* TODO It is possible to add an option to make it appear
|
||||
* in the system as a zoned drive.
|
||||
*/
|
||||
return 0;
|
||||
default:
|
||||
dev_err(&vdev->dev, "unsupported zone model %d\n", model);
|
||||
return -EINVAL;
|
||||
|
@ -735,32 +805,31 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
|
|||
|
||||
virtio_cread(vdev, struct virtio_blk_config,
|
||||
zoned.max_open_zones, &v);
|
||||
disk_set_max_open_zones(vblk->disk, le32_to_cpu(v));
|
||||
|
||||
dev_dbg(&vdev->dev, "max open zones = %u\n", le32_to_cpu(v));
|
||||
disk_set_max_open_zones(vblk->disk, v);
|
||||
dev_dbg(&vdev->dev, "max open zones = %u\n", v);
|
||||
|
||||
virtio_cread(vdev, struct virtio_blk_config,
|
||||
zoned.max_active_zones, &v);
|
||||
disk_set_max_active_zones(vblk->disk, le32_to_cpu(v));
|
||||
dev_dbg(&vdev->dev, "max active zones = %u\n", le32_to_cpu(v));
|
||||
disk_set_max_active_zones(vblk->disk, v);
|
||||
dev_dbg(&vdev->dev, "max active zones = %u\n", v);
|
||||
|
||||
virtio_cread(vdev, struct virtio_blk_config,
|
||||
zoned.write_granularity, &v);
|
||||
if (!v) {
|
||||
zoned.write_granularity, &wg);
|
||||
if (!wg) {
|
||||
dev_warn(&vdev->dev, "zero write granularity reported\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
blk_queue_physical_block_size(q, le32_to_cpu(v));
|
||||
blk_queue_io_min(q, le32_to_cpu(v));
|
||||
blk_queue_physical_block_size(q, wg);
|
||||
blk_queue_io_min(q, wg);
|
||||
|
||||
dev_dbg(&vdev->dev, "write granularity = %u\n", le32_to_cpu(v));
|
||||
dev_dbg(&vdev->dev, "write granularity = %u\n", wg);
|
||||
|
||||
/*
|
||||
* virtio ZBD specification doesn't require zones to be a power of
|
||||
* two sectors in size, but the code in this driver expects that.
|
||||
*/
|
||||
virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors, &v);
|
||||
vblk->zone_sectors = le32_to_cpu(v);
|
||||
virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors,
|
||||
&vblk->zone_sectors);
|
||||
if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) {
|
||||
dev_err(&vdev->dev,
|
||||
"zoned device with non power of two zone size %u\n",
|
||||
|
@ -783,36 +852,46 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
|
|||
dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
blk_queue_max_zone_append_sectors(q, le32_to_cpu(v));
|
||||
dev_dbg(&vdev->dev, "max append sectors = %u\n", le32_to_cpu(v));
|
||||
if ((v << SECTOR_SHIFT) < wg) {
|
||||
dev_err(&vdev->dev,
|
||||
"write granularity %u exceeds max_append_sectors %u limit\n",
|
||||
wg, v);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
blk_queue_max_zone_append_sectors(q, v);
|
||||
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool virtblk_has_zoned_feature(struct virtio_device *vdev)
|
||||
{
|
||||
return virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED);
|
||||
}
|
||||
#else
|
||||
|
||||
/*
|
||||
* Zoned block device support is not configured in this kernel.
|
||||
* We only need to define a few symbols to avoid compilation errors.
|
||||
* Host-managed zoned devices can't be supported, but others are
|
||||
* good to go as regular block devices.
|
||||
*/
|
||||
#define virtblk_report_zones NULL
|
||||
|
||||
static inline void virtblk_revalidate_zones(struct virtio_blk *vblk)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int virtblk_probe_zoned_device(struct virtio_device *vdev,
|
||||
struct virtio_blk *vblk, struct request_queue *q)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
u8 model;
|
||||
|
||||
static inline bool virtblk_has_zoned_feature(struct virtio_device *vdev)
|
||||
{
|
||||
return false;
|
||||
virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model);
|
||||
if (model == VIRTIO_BLK_Z_HM) {
|
||||
dev_err(&vdev->dev,
|
||||
"virtio_blk: zoned devices are not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
|
||||
|
@ -831,7 +910,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
|
|||
return PTR_ERR(req);
|
||||
|
||||
vbr = blk_mq_rq_to_pdu(req);
|
||||
vbr->in_hdr_len = sizeof(vbr->status);
|
||||
vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
|
||||
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
|
||||
vbr->out_hdr.sector = 0;
|
||||
|
||||
|
@ -840,7 +919,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
|
|||
goto out;
|
||||
|
||||
blk_execute_rq(req, false);
|
||||
err = blk_status_to_errno(virtblk_result(vbr->status));
|
||||
err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
|
||||
out:
|
||||
blk_mq_free_request(req);
|
||||
return err;
|
||||
|
@ -1498,15 +1577,16 @@ static int virtblk_probe(struct virtio_device *vdev)
|
|||
virtblk_update_capacity(vblk, false);
|
||||
virtio_device_ready(vdev);
|
||||
|
||||
if (virtblk_has_zoned_feature(vdev)) {
|
||||
/*
|
||||
* All steps that follow use the VQs therefore they need to be
|
||||
* placed after the virtio_device_ready() call above.
|
||||
*/
|
||||
if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
|
||||
err = virtblk_probe_zoned_device(vdev, vblk, q);
|
||||
if (err)
|
||||
goto out_cleanup_disk;
|
||||
}
|
||||
|
||||
dev_info(&vdev->dev, "blk config size: %zu\n",
|
||||
sizeof(struct virtio_blk_config));
|
||||
|
||||
err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
|
||||
if (err)
|
||||
goto out_cleanup_disk;
|
||||
|
@ -1607,10 +1687,7 @@ static unsigned int features[] = {
|
|||
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
|
||||
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
|
||||
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
|
||||
VIRTIO_BLK_F_SECURE_ERASE,
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
VIRTIO_BLK_F_ZONED,
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
VIRTIO_BLK_F_SECURE_ERASE, VIRTIO_BLK_F_ZONED,
|
||||
};
|
||||
|
||||
static struct virtio_driver virtio_blk = {
|
||||
|
|
|
@ -511,7 +511,7 @@ static const char *btbcm_get_board_name(struct device *dev)
|
|||
len = strlen(tmp) + 1;
|
||||
board_type = devm_kzalloc(dev, len, GFP_KERNEL);
|
||||
strscpy(board_type, tmp, len);
|
||||
for (i = 0; i < board_type[i]; i++) {
|
||||
for (i = 0; i < len; i++) {
|
||||
if (board_type[i] == '/')
|
||||
board_type[i] = '-';
|
||||
}
|
||||
|
|
|
@ -358,6 +358,7 @@ static void btsdio_remove(struct sdio_func *func)
|
|||
if (!data)
|
||||
return;
|
||||
|
||||
cancel_work_sync(&data->work);
|
||||
hdev = data->hdev;
|
||||
|
||||
sdio_set_drvdata(func, NULL);
|
||||
|
|
|
@ -329,6 +329,12 @@ static int of_weim_notify(struct notifier_block *nb, unsigned long action,
|
|||
"Failed to setup timing for '%pOF'\n", rd->dn);
|
||||
|
||||
if (!of_node_check_flag(rd->dn, OF_POPULATED)) {
|
||||
/*
|
||||
* Clear the flag before adding the device so that
|
||||
* fw_devlink doesn't skip adding consumers to this
|
||||
* device.
|
||||
*/
|
||||
rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
|
||||
if (!of_platform_device_create(rd->dn, NULL, &pdev->dev)) {
|
||||
dev_err(&pdev->dev,
|
||||
"Failed to create child device '%pOF'\n",
|
||||
|
|
|
@ -97,10 +97,6 @@ struct quad8 {
|
|||
struct quad8_reg __iomem *reg;
|
||||
};
|
||||
|
||||
/* Borrow Toggle flip-flop */
|
||||
#define QUAD8_FLAG_BT BIT(0)
|
||||
/* Carry Toggle flip-flop */
|
||||
#define QUAD8_FLAG_CT BIT(1)
|
||||
/* Error flag */
|
||||
#define QUAD8_FLAG_E BIT(4)
|
||||
/* Up/Down flag */
|
||||
|
@ -133,6 +129,9 @@ struct quad8 {
|
|||
#define QUAD8_CMR_QUADRATURE_X2 0x10
|
||||
#define QUAD8_CMR_QUADRATURE_X4 0x18
|
||||
|
||||
/* Each Counter is 24 bits wide */
|
||||
#define LS7267_CNTR_MAX GENMASK(23, 0)
|
||||
|
||||
static int quad8_signal_read(struct counter_device *counter,
|
||||
struct counter_signal *signal,
|
||||
enum counter_signal_level *level)
|
||||
|
@ -156,18 +155,10 @@ static int quad8_count_read(struct counter_device *counter,
|
|||
{
|
||||
struct quad8 *const priv = counter_priv(counter);
|
||||
struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
|
||||
unsigned int flags;
|
||||
unsigned int borrow;
|
||||
unsigned int carry;
|
||||
unsigned long irqflags;
|
||||
int i;
|
||||
|
||||
flags = ioread8(&chan->control);
|
||||
borrow = flags & QUAD8_FLAG_BT;
|
||||
carry = !!(flags & QUAD8_FLAG_CT);
|
||||
|
||||
/* Borrow XOR Carry effectively doubles count range */
|
||||
*val = (unsigned long)(borrow ^ carry) << 24;
|
||||
*val = 0;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, irqflags);
|
||||
|
||||
|
@ -191,8 +182,7 @@ static int quad8_count_write(struct counter_device *counter,
|
|||
unsigned long irqflags;
|
||||
int i;
|
||||
|
||||
/* Only 24-bit values are supported */
|
||||
if (val > 0xFFFFFF)
|
||||
if (val > LS7267_CNTR_MAX)
|
||||
return -ERANGE;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, irqflags);
|
||||
|
@ -378,7 +368,7 @@ static int quad8_action_read(struct counter_device *counter,
|
|||
|
||||
/* Handle Index signals */
|
||||
if (synapse->signal->id >= 16) {
|
||||
if (priv->preset_enable[count->id])
|
||||
if (!priv->preset_enable[count->id])
|
||||
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
|
||||
else
|
||||
*action = COUNTER_SYNAPSE_ACTION_NONE;
|
||||
|
@ -806,8 +796,7 @@ static int quad8_count_preset_write(struct counter_device *counter,
|
|||
struct quad8 *const priv = counter_priv(counter);
|
||||
unsigned long irqflags;
|
||||
|
||||
/* Only 24-bit values are supported */
|
||||
if (preset > 0xFFFFFF)
|
||||
if (preset > LS7267_CNTR_MAX)
|
||||
return -ERANGE;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, irqflags);
|
||||
|
@ -834,8 +823,7 @@ static int quad8_count_ceiling_read(struct counter_device *counter,
|
|||
*ceiling = priv->preset[count->id];
|
||||
break;
|
||||
default:
|
||||
/* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
|
||||
*ceiling = 0x1FFFFFF;
|
||||
*ceiling = LS7267_CNTR_MAX;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -850,8 +838,7 @@ static int quad8_count_ceiling_write(struct counter_device *counter,
|
|||
struct quad8 *const priv = counter_priv(counter);
|
||||
unsigned long irqflags;
|
||||
|
||||
/* Only 24-bit values are supported */
|
||||
if (ceiling > 0xFFFFFF)
|
||||
if (ceiling > LS7267_CNTR_MAX)
|
||||
return -ERANGE;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, irqflags);
|
||||
|
|
|
@ -101,25 +101,40 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
|
|||
BIT(CXL_CM_CAP_CAP_ID_HDM));
|
||||
}
|
||||
|
||||
static struct cxl_hdm *devm_cxl_setup_emulated_hdm(struct cxl_port *port,
|
||||
struct cxl_endpoint_dvsec_info *info)
|
||||
static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
|
||||
{
|
||||
struct device *dev = &port->dev;
|
||||
struct cxl_hdm *cxlhdm;
|
||||
void __iomem *hdm;
|
||||
u32 ctrl;
|
||||
int i;
|
||||
|
||||
if (!info)
|
||||
return false;
|
||||
|
||||
cxlhdm = dev_get_drvdata(&info->port->dev);
|
||||
hdm = cxlhdm->regs.hdm_decoder;
|
||||
|
||||
if (!hdm)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If HDM decoders are present and the driver is in control of
|
||||
* Mem_Enable skip DVSEC based emulation
|
||||
*/
|
||||
if (!info->mem_enabled)
|
||||
return ERR_PTR(-ENODEV);
|
||||
return false;
|
||||
|
||||
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
|
||||
if (!cxlhdm)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
/*
|
||||
* If any decoders are committed already, there should not be any
|
||||
* emulated DVSEC decoders.
|
||||
*/
|
||||
for (i = 0; i < cxlhdm->decoder_count; i++) {
|
||||
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
|
||||
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
|
||||
return false;
|
||||
}
|
||||
|
||||
cxlhdm->port = port;
|
||||
cxlhdm->decoder_count = info->ranges;
|
||||
cxlhdm->target_count = info->ranges;
|
||||
dev_set_drvdata(&port->dev, cxlhdm);
|
||||
|
||||
return cxlhdm;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -138,13 +153,14 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
|
|||
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
|
||||
if (!cxlhdm)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cxlhdm->port = port;
|
||||
crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
|
||||
if (!crb) {
|
||||
if (info && info->mem_enabled)
|
||||
return devm_cxl_setup_emulated_hdm(port, info);
|
||||
dev_set_drvdata(dev, cxlhdm);
|
||||
|
||||
crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
|
||||
if (!crb && info && info->mem_enabled) {
|
||||
cxlhdm->decoder_count = info->ranges;
|
||||
return cxlhdm;
|
||||
} else if (!crb) {
|
||||
dev_err(dev, "No component registers mapped\n");
|
||||
return ERR_PTR(-ENXIO);
|
||||
}
|
||||
|
@ -160,7 +176,15 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
|
|||
return ERR_PTR(-ENXIO);
|
||||
}
|
||||
|
||||
dev_set_drvdata(dev, cxlhdm);
|
||||
/*
|
||||
* Now that the hdm capability is parsed, decide if range
|
||||
* register emulation is needed and fixup cxlhdm accordingly.
|
||||
*/
|
||||
if (should_emulate_decoders(info)) {
|
||||
dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
|
||||
info->ranges > 1 ? "s" : "");
|
||||
cxlhdm->decoder_count = info->ranges;
|
||||
}
|
||||
|
||||
return cxlhdm;
|
||||
}
|
||||
|
@ -714,14 +738,20 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
|
||||
struct cxl_decoder *cxld, int which,
|
||||
struct cxl_endpoint_dvsec_info *info)
|
||||
static int cxl_setup_hdm_decoder_from_dvsec(
|
||||
struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
|
||||
int which, struct cxl_endpoint_dvsec_info *info)
|
||||
{
|
||||
struct cxl_endpoint_decoder *cxled;
|
||||
u64 len;
|
||||
int rc;
|
||||
|
||||
if (!is_cxl_endpoint(port))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!range_len(&info->dvsec_range[which]))
|
||||
cxled = to_cxl_endpoint_decoder(&cxld->dev);
|
||||
len = range_len(&info->dvsec_range[which]);
|
||||
if (!len)
|
||||
return -ENOENT;
|
||||
|
||||
cxld->target_type = CXL_DECODER_EXPANDER;
|
||||
|
@ -736,40 +766,24 @@ static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
|
|||
cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
|
||||
port->commit_end = cxld->id;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool should_emulate_decoders(struct cxl_port *port)
|
||||
{
|
||||
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
|
||||
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
|
||||
u32 ctrl;
|
||||
int i;
|
||||
|
||||
if (!is_cxl_endpoint(cxlhdm->port))
|
||||
return false;
|
||||
|
||||
if (!hdm)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If any decoders are committed already, there should not be any
|
||||
* emulated DVSEC decoders.
|
||||
*/
|
||||
for (i = 0; i < cxlhdm->decoder_count; i++) {
|
||||
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
|
||||
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
|
||||
return false;
|
||||
rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
|
||||
if (rc) {
|
||||
dev_err(&port->dev,
|
||||
"decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
|
||||
port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
|
||||
return rc;
|
||||
}
|
||||
*dpa_base += len;
|
||||
cxled->state = CXL_DECODER_STATE_AUTO;
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
|
||||
int *target_map, void __iomem *hdm, int which,
|
||||
u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
|
||||
{
|
||||
struct cxl_endpoint_decoder *cxled = NULL;
|
||||
struct cxl_endpoint_decoder *cxled;
|
||||
u64 size, base, skip, dpa_size;
|
||||
bool committed;
|
||||
u32 remainder;
|
||||
|
@ -780,11 +794,9 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
|
|||
unsigned char target_id[8];
|
||||
} target_list;
|
||||
|
||||
if (should_emulate_decoders(port))
|
||||
return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
|
||||
|
||||
if (is_endpoint_decoder(&cxld->dev))
|
||||
cxled = to_cxl_endpoint_decoder(&cxld->dev);
|
||||
if (should_emulate_decoders(info))
|
||||
return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
|
||||
which, info);
|
||||
|
||||
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
|
||||
base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
|
||||
|
@ -806,9 +818,6 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
|
|||
.end = base + size - 1,
|
||||
};
|
||||
|
||||
if (cxled && !committed && range_len(&info->dvsec_range[which]))
|
||||
return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
|
||||
|
||||
/* decoders are enabled if committed */
|
||||
if (committed) {
|
||||
cxld->flags |= CXL_DECODER_F_ENABLE;
|
||||
|
@ -846,7 +855,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (!cxled) {
|
||||
if (!info) {
|
||||
target_list.value =
|
||||
ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
|
||||
for (i = 0; i < cxld->interleave_ways; i++)
|
||||
|
@ -866,6 +875,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
|
|||
return -ENXIO;
|
||||
}
|
||||
skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
|
||||
cxled = to_cxl_endpoint_decoder(&cxld->dev);
|
||||
rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
|
||||
if (rc) {
|
||||
dev_err(&port->dev,
|
||||
|
|
|
@ -462,7 +462,7 @@ static struct pci_doe_mb *find_cdat_doe(struct device *uport)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#define CDAT_DOE_REQ(entry_handle) \
|
||||
#define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \
|
||||
(FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
|
||||
CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
|
||||
FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
|
||||
|
@ -475,8 +475,8 @@ static void cxl_doe_task_complete(struct pci_doe_task *task)
|
|||
}
|
||||
|
||||
struct cdat_doe_task {
|
||||
u32 request_pl;
|
||||
u32 response_pl[32];
|
||||
__le32 request_pl;
|
||||
__le32 response_pl[32];
|
||||
struct completion c;
|
||||
struct pci_doe_task task;
|
||||
};
|
||||
|
@ -510,10 +510,10 @@ static int cxl_cdat_get_length(struct device *dev,
|
|||
return rc;
|
||||
}
|
||||
wait_for_completion(&t.c);
|
||||
if (t.task.rv < sizeof(u32))
|
||||
if (t.task.rv < 2 * sizeof(__le32))
|
||||
return -EIO;
|
||||
|
||||
*length = t.response_pl[1];
|
||||
*length = le32_to_cpu(t.response_pl[1]);
|
||||
dev_dbg(dev, "CDAT length %zu\n", *length);
|
||||
|
||||
return 0;
|
||||
|
@ -524,13 +524,13 @@ static int cxl_cdat_read_table(struct device *dev,
|
|||
struct cxl_cdat *cdat)
|
||||
{
|
||||
size_t length = cdat->length;
|
||||
u32 *data = cdat->table;
|
||||
__le32 *data = cdat->table;
|
||||
int entry_handle = 0;
|
||||
|
||||
do {
|
||||
DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
|
||||
struct cdat_entry_header *entry;
|
||||
size_t entry_dw;
|
||||
u32 *entry;
|
||||
int rc;
|
||||
|
||||
rc = pci_doe_submit_task(cdat_doe, &t.task);
|
||||
|
@ -539,26 +539,34 @@ static int cxl_cdat_read_table(struct device *dev,
|
|||
return rc;
|
||||
}
|
||||
wait_for_completion(&t.c);
|
||||
/* 1 DW header + 1 DW data min */
|
||||
if (t.task.rv < (2 * sizeof(u32)))
|
||||
|
||||
/* 1 DW Table Access Response Header + CDAT entry */
|
||||
entry = (struct cdat_entry_header *)(t.response_pl + 1);
|
||||
if ((entry_handle == 0 &&
|
||||
t.task.rv != sizeof(__le32) + sizeof(struct cdat_header)) ||
|
||||
(entry_handle > 0 &&
|
||||
(t.task.rv < sizeof(__le32) + sizeof(*entry) ||
|
||||
t.task.rv != sizeof(__le32) + le16_to_cpu(entry->length))))
|
||||
return -EIO;
|
||||
|
||||
/* Get the CXL table access header entry handle */
|
||||
entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
|
||||
t.response_pl[0]);
|
||||
entry = t.response_pl + 1;
|
||||
entry_dw = t.task.rv / sizeof(u32);
|
||||
le32_to_cpu(t.response_pl[0]));
|
||||
entry_dw = t.task.rv / sizeof(__le32);
|
||||
/* Skip Header */
|
||||
entry_dw -= 1;
|
||||
entry_dw = min(length / sizeof(u32), entry_dw);
|
||||
entry_dw = min(length / sizeof(__le32), entry_dw);
|
||||
/* Prevent length < 1 DW from causing a buffer overflow */
|
||||
if (entry_dw) {
|
||||
memcpy(data, entry, entry_dw * sizeof(u32));
|
||||
length -= entry_dw * sizeof(u32);
|
||||
memcpy(data, entry, entry_dw * sizeof(__le32));
|
||||
length -= entry_dw * sizeof(__le32);
|
||||
data += entry_dw;
|
||||
}
|
||||
} while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
|
||||
|
||||
/* Length in CDAT header may exceed concatenation of CDAT entries */
|
||||
cdat->length -= length;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -62,9 +62,9 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
|
|||
return is_cxl_nvdimm_bridge(dev);
|
||||
}
|
||||
|
||||
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *start)
|
||||
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
|
||||
{
|
||||
struct cxl_port *port = find_cxl_root(start);
|
||||
struct cxl_port *port = find_cxl_root(dev_get_drvdata(&cxlmd->dev));
|
||||
struct device *dev;
|
||||
|
||||
if (!port)
|
||||
|
@ -253,7 +253,7 @@ int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
|
|||
struct device *dev;
|
||||
int rc;
|
||||
|
||||
cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev);
|
||||
cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
|
||||
if (!cxl_nvb)
|
||||
return -ENODEV;
|
||||
|
||||
|
|
|
@ -823,41 +823,17 @@ static bool dev_is_cxl_root_child(struct device *dev)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* Find a 2nd level CXL port that has a dport that is an ancestor of @match */
|
||||
static int match_root_child(struct device *dev, const void *match)
|
||||
struct cxl_port *find_cxl_root(struct cxl_port *port)
|
||||
{
|
||||
const struct device *iter = NULL;
|
||||
struct cxl_dport *dport;
|
||||
struct cxl_port *port;
|
||||
struct cxl_port *iter = port;
|
||||
|
||||
if (!dev_is_cxl_root_child(dev))
|
||||
return 0;
|
||||
while (iter && !is_cxl_root(iter))
|
||||
iter = to_cxl_port(iter->dev.parent);
|
||||
|
||||
port = to_cxl_port(dev);
|
||||
iter = match;
|
||||
while (iter) {
|
||||
dport = cxl_find_dport_by_dev(port, iter);
|
||||
if (dport)
|
||||
break;
|
||||
iter = iter->parent;
|
||||
}
|
||||
|
||||
return !!iter;
|
||||
}
|
||||
|
||||
struct cxl_port *find_cxl_root(struct device *dev)
|
||||
{
|
||||
struct device *port_dev;
|
||||
struct cxl_port *root;
|
||||
|
||||
port_dev = bus_find_device(&cxl_bus_type, NULL, dev, match_root_child);
|
||||
if (!port_dev)
|
||||
if (!iter)
|
||||
return NULL;
|
||||
|
||||
root = to_cxl_port(port_dev->parent);
|
||||
get_device(&root->dev);
|
||||
put_device(port_dev);
|
||||
return root;
|
||||
get_device(&iter->dev);
|
||||
return iter;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
|
||||
|
||||
|
|
|
@ -134,9 +134,13 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
|
|||
struct cxl_endpoint_decoder *cxled = p->targets[i];
|
||||
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
|
||||
struct cxl_port *iter = cxled_to_port(cxled);
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
struct cxl_ep *ep;
|
||||
int rc = 0;
|
||||
|
||||
if (cxlds->rcd)
|
||||
goto endpoint_reset;
|
||||
|
||||
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
|
||||
iter = to_cxl_port(iter->dev.parent);
|
||||
|
||||
|
@ -153,6 +157,7 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
|
|||
return rc;
|
||||
}
|
||||
|
||||
endpoint_reset:
|
||||
rc = cxled->cxld.reset(&cxled->cxld);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
@ -1199,6 +1204,7 @@ static void cxl_region_teardown_targets(struct cxl_region *cxlr)
|
|||
{
|
||||
struct cxl_region_params *p = &cxlr->params;
|
||||
struct cxl_endpoint_decoder *cxled;
|
||||
struct cxl_dev_state *cxlds;
|
||||
struct cxl_memdev *cxlmd;
|
||||
struct cxl_port *iter;
|
||||
struct cxl_ep *ep;
|
||||
|
@ -1214,6 +1220,10 @@ static void cxl_region_teardown_targets(struct cxl_region *cxlr)
|
|||
for (i = 0; i < p->nr_targets; i++) {
|
||||
cxled = p->targets[i];
|
||||
cxlmd = cxled_to_memdev(cxled);
|
||||
cxlds = cxlmd->cxlds;
|
||||
|
||||
if (cxlds->rcd)
|
||||
continue;
|
||||
|
||||
iter = cxled_to_port(cxled);
|
||||
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
|
||||
|
@ -1229,14 +1239,24 @@ static int cxl_region_setup_targets(struct cxl_region *cxlr)
|
|||
{
|
||||
struct cxl_region_params *p = &cxlr->params;
|
||||
struct cxl_endpoint_decoder *cxled;
|
||||
struct cxl_dev_state *cxlds;
|
||||
int i, rc, rch = 0, vh = 0;
|
||||
struct cxl_memdev *cxlmd;
|
||||
struct cxl_port *iter;
|
||||
struct cxl_ep *ep;
|
||||
int i, rc;
|
||||
|
||||
for (i = 0; i < p->nr_targets; i++) {
|
||||
cxled = p->targets[i];
|
||||
cxlmd = cxled_to_memdev(cxled);
|
||||
cxlds = cxlmd->cxlds;
|
||||
|
||||
/* validate that all targets agree on topology */
|
||||
if (!cxlds->rcd) {
|
||||
vh++;
|
||||
} else {
|
||||
rch++;
|
||||
continue;
|
||||
}
|
||||
|
||||
iter = cxled_to_port(cxled);
|
||||
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
|
||||
|
@ -1256,6 +1276,12 @@ static int cxl_region_setup_targets(struct cxl_region *cxlr)
|
|||
}
|
||||
}
|
||||
|
||||
if (rch && vh) {
|
||||
dev_err(&cxlr->dev, "mismatched CXL topologies detected\n");
|
||||
cxl_region_teardown_targets(cxlr);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1648,6 +1674,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
|
|||
if (rc)
|
||||
goto err_decrement;
|
||||
p->state = CXL_CONFIG_ACTIVE;
|
||||
set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
|
||||
}
|
||||
|
||||
cxled->cxld.interleave_ways = p->interleave_ways;
|
||||
|
@ -1749,8 +1776,6 @@ static int attach_target(struct cxl_region *cxlr,
|
|||
|
||||
down_read(&cxl_dpa_rwsem);
|
||||
rc = cxl_region_attach(cxlr, cxled, pos);
|
||||
if (rc == 0)
|
||||
set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
|
||||
up_read(&cxl_dpa_rwsem);
|
||||
up_write(&cxl_region_rwsem);
|
||||
return rc;
|
||||
|
@ -2251,7 +2276,7 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
|
|||
* bridge for one device is the same for all.
|
||||
*/
|
||||
if (i == 0) {
|
||||
cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev);
|
||||
cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
|
||||
if (!cxl_nvb) {
|
||||
cxlr_pmem = ERR_PTR(-ENODEV);
|
||||
goto out;
|
||||
|
|
|
@ -658,7 +658,7 @@ struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
|
|||
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
|
||||
resource_size_t component_reg_phys,
|
||||
struct cxl_dport *parent_dport);
|
||||
struct cxl_port *find_cxl_root(struct device *dev);
|
||||
struct cxl_port *find_cxl_root(struct cxl_port *port);
|
||||
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
|
||||
void cxl_bus_rescan(void);
|
||||
void cxl_bus_drain(void);
|
||||
|
@ -695,13 +695,15 @@ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
|
|||
|
||||
/**
|
||||
* struct cxl_endpoint_dvsec_info - Cached DVSEC info
|
||||
* @mem_enabled: cached value of mem_enabled in the DVSEC, PCIE_DEVICE
|
||||
* @mem_enabled: cached value of mem_enabled in the DVSEC at init time
|
||||
* @ranges: Number of active HDM ranges this device uses.
|
||||
* @port: endpoint port associated with this info instance
|
||||
* @dvsec_range: cached attributes of the ranges in the DVSEC, PCIE_DEVICE
|
||||
*/
|
||||
struct cxl_endpoint_dvsec_info {
|
||||
bool mem_enabled;
|
||||
int ranges;
|
||||
struct cxl_port *port;
|
||||
struct range dvsec_range[2];
|
||||
};
|
||||
|
||||
|
@ -758,7 +760,7 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
|
|||
bool is_cxl_nvdimm(struct device *dev);
|
||||
bool is_cxl_nvdimm_bridge(struct device *dev);
|
||||
int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd);
|
||||
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *dev);
|
||||
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd);
|
||||
|
||||
#ifdef CONFIG_CXL_REGION
|
||||
bool is_cxl_pmem_region(struct device *dev);
|
||||
|
|
|
@ -68,6 +68,20 @@ enum cxl_regloc_type {
|
|||
CXL_REGLOC_RBI_TYPES
|
||||
};
|
||||
|
||||
struct cdat_header {
|
||||
__le32 length;
|
||||
u8 revision;
|
||||
u8 checksum;
|
||||
u8 reserved[6];
|
||||
__le32 sequence;
|
||||
} __packed;
|
||||
|
||||
struct cdat_entry_header {
|
||||
u8 type;
|
||||
u8 reserved;
|
||||
__le16 length;
|
||||
} __packed;
|
||||
|
||||
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
|
||||
struct cxl_dev_state;
|
||||
int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
|
||||
|
|
|
@ -78,8 +78,8 @@ static int cxl_switch_port_probe(struct cxl_port *port)
|
|||
|
||||
static int cxl_endpoint_port_probe(struct cxl_port *port)
|
||||
{
|
||||
struct cxl_endpoint_dvsec_info info = { .port = port };
|
||||
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
|
||||
struct cxl_endpoint_dvsec_info info = { 0 };
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
struct cxl_hdm *cxlhdm;
|
||||
struct cxl_port *root;
|
||||
|
@ -119,7 +119,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
|
|||
* This can't fail in practice as CXL root exit unregisters all
|
||||
* descendant ports and that in turn synchronizes with cxl_port_probe()
|
||||
*/
|
||||
root = find_cxl_root(&cxlmd->dev);
|
||||
root = find_cxl_root(port);
|
||||
|
||||
/*
|
||||
* Now that all endpoint decoders are successfully enumerated, try to
|
||||
|
|
|
@ -75,6 +75,7 @@
|
|||
|
||||
#define REG_TX_INTSTATE(idx) (0x0030 + (idx) * 4)
|
||||
#define REG_RX_INTSTATE(idx) (0x0040 + (idx) * 4)
|
||||
#define REG_GLOBAL_INTSTATE(idx) (0x0050 + (idx) * 4)
|
||||
#define REG_CHAN_INTSTATUS(ch, idx) (0x8010 + (ch) * 0x200 + (idx) * 4)
|
||||
#define REG_CHAN_INTMASK(ch, idx) (0x8020 + (ch) * 0x200 + (idx) * 4)
|
||||
|
||||
|
@ -511,7 +512,10 @@ static int admac_terminate_all(struct dma_chan *chan)
|
|||
admac_stop_chan(adchan);
|
||||
admac_reset_rings(adchan);
|
||||
|
||||
adchan->current_tx = NULL;
|
||||
if (adchan->current_tx) {
|
||||
list_add_tail(&adchan->current_tx->node, &adchan->to_free);
|
||||
adchan->current_tx = NULL;
|
||||
}
|
||||
/*
|
||||
* Descriptors can only be freed after the tasklet
|
||||
* has been killed (in admac_synchronize).
|
||||
|
@ -672,13 +676,14 @@ static void admac_handle_chan_int(struct admac_data *ad, int no)
|
|||
static irqreturn_t admac_interrupt(int irq, void *devid)
|
||||
{
|
||||
struct admac_data *ad = devid;
|
||||
u32 rx_intstate, tx_intstate;
|
||||
u32 rx_intstate, tx_intstate, global_intstate;
|
||||
int i;
|
||||
|
||||
rx_intstate = readl_relaxed(ad->base + REG_RX_INTSTATE(ad->irq_index));
|
||||
tx_intstate = readl_relaxed(ad->base + REG_TX_INTSTATE(ad->irq_index));
|
||||
global_intstate = readl_relaxed(ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
|
||||
|
||||
if (!tx_intstate && !rx_intstate)
|
||||
if (!tx_intstate && !rx_intstate && !global_intstate)
|
||||
return IRQ_NONE;
|
||||
|
||||
for (i = 0; i < ad->nchannels; i += 2) {
|
||||
|
@ -693,6 +698,12 @@ static irqreturn_t admac_interrupt(int irq, void *devid)
|
|||
rx_intstate >>= 1;
|
||||
}
|
||||
|
||||
if (global_intstate) {
|
||||
dev_warn(ad->dev, "clearing unknown global interrupt flag: %x\n",
|
||||
global_intstate);
|
||||
writel_relaxed(~(u32) 0, ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -850,6 +861,9 @@ static int admac_probe(struct platform_device *pdev)
|
|||
|
||||
dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
|
||||
dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
||||
dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
|
||||
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
|
||||
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
|
||||
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
|
||||
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
|
||||
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
|
||||
|
|
|
@ -1342,7 +1342,7 @@ int dmaenginem_async_device_register(struct dma_device *device)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
return devm_add_action(device->dev, dmaenginem_async_device_unregister, device);
|
||||
return devm_add_action_or_reset(device->dev, dmaenginem_async_device_unregister, device);
|
||||
}
|
||||
EXPORT_SYMBOL(dmaenginem_async_device_register);
|
||||
|
||||
|
|
|
@ -277,7 +277,7 @@ failed:
|
|||
|
||||
/**
|
||||
* xdma_xfer_start - Start DMA transfer
|
||||
* @xdma_chan: DMA channel pointer
|
||||
* @xchan: DMA channel pointer
|
||||
*/
|
||||
static int xdma_xfer_start(struct xdma_chan *xchan)
|
||||
{
|
||||
|
|
|
@ -100,7 +100,7 @@ config GPIO_GENERIC
|
|||
tristate
|
||||
|
||||
config GPIO_REGMAP
|
||||
depends on REGMAP
|
||||
select REGMAP
|
||||
tristate
|
||||
|
||||
# put drivers in the right section, in alphabetical order
|
||||
|
|
|
@ -324,7 +324,7 @@ static struct irq_chip gpio_irqchip = {
|
|||
.irq_enable = gpio_irq_enable,
|
||||
.irq_disable = gpio_irq_disable,
|
||||
.irq_set_type = gpio_irq_type,
|
||||
.flags = IRQCHIP_SET_TYPE_MASKED,
|
||||
.flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
|
||||
};
|
||||
|
||||
static void gpio_irq_handler(struct irq_desc *desc)
|
||||
|
@ -641,9 +641,6 @@ static void davinci_gpio_save_context(struct davinci_gpio_controller *chips,
|
|||
context->set_falling = readl_relaxed(&g->set_falling);
|
||||
}
|
||||
|
||||
/* Clear Bank interrupt enable bit */
|
||||
writel_relaxed(0, base + BINTEN);
|
||||
|
||||
/* Clear all interrupt status registers */
|
||||
writel_relaxed(GENMASK(31, 0), &g->intstat);
|
||||
}
|
||||
|
|
|
@ -177,6 +177,40 @@ void dm_helpers_dp_update_branch_info(
|
|||
const struct dc_link *link)
|
||||
{}
|
||||
|
||||
static void dm_helpers_construct_old_payload(
|
||||
struct dc_link *link,
|
||||
int pbn_per_slot,
|
||||
struct drm_dp_mst_atomic_payload *new_payload,
|
||||
struct drm_dp_mst_atomic_payload *old_payload)
|
||||
{
|
||||
struct link_mst_stream_allocation_table current_link_table =
|
||||
link->mst_stream_alloc_table;
|
||||
struct link_mst_stream_allocation *dc_alloc;
|
||||
int i;
|
||||
|
||||
*old_payload = *new_payload;
|
||||
|
||||
/* Set correct time_slots/PBN of old payload.
|
||||
* other fields (delete & dsc_enabled) in
|
||||
* struct drm_dp_mst_atomic_payload are don't care fields
|
||||
* while calling drm_dp_remove_payload()
|
||||
*/
|
||||
for (i = 0; i < current_link_table.stream_count; i++) {
|
||||
dc_alloc =
|
||||
¤t_link_table.stream_allocations[i];
|
||||
|
||||
if (dc_alloc->vcp_id == new_payload->vcpi) {
|
||||
old_payload->time_slots = dc_alloc->slot_count;
|
||||
old_payload->pbn = dc_alloc->slot_count * pbn_per_slot;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* make sure there is an old payload*/
|
||||
ASSERT(i != current_link_table.stream_count);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Writes payload allocation table in immediate downstream device.
|
||||
*/
|
||||
|
@ -188,7 +222,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
|
|||
{
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct drm_dp_mst_topology_state *mst_state;
|
||||
struct drm_dp_mst_atomic_payload *payload;
|
||||
struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload;
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr;
|
||||
|
||||
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
|
||||
|
@ -204,17 +238,26 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
|
|||
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
|
||||
|
||||
/* It's OK for this to fail */
|
||||
payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
|
||||
if (enable)
|
||||
drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
|
||||
else
|
||||
drm_dp_remove_payload(mst_mgr, mst_state, payload, payload);
|
||||
new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
|
||||
|
||||
if (enable) {
|
||||
target_payload = new_payload;
|
||||
|
||||
drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload);
|
||||
} else {
|
||||
/* construct old payload by VCPI*/
|
||||
dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div,
|
||||
new_payload, &old_payload);
|
||||
target_payload = &old_payload;
|
||||
|
||||
drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload);
|
||||
}
|
||||
|
||||
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
|
||||
* AUX message. The sequence is slot 1-63 allocated sequence for each
|
||||
* stream. AMD ASIC stream slot allocation should follow the same
|
||||
* sequence. copy DRM MST allocation to dc */
|
||||
fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
|
||||
fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -61,6 +61,12 @@
|
|||
#define CTF_OFFSET_HOTSPOT 5
|
||||
#define CTF_OFFSET_MEM 5
|
||||
|
||||
static const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5};
|
||||
static const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
|
||||
|
||||
#define DECODE_GEN_SPEED(gen_speed_idx) (pmfw_decoded_link_speed[gen_speed_idx])
|
||||
#define DECODE_LANE_WIDTH(lane_width_idx) (pmfw_decoded_link_width[lane_width_idx])
|
||||
|
||||
struct smu_13_0_max_sustainable_clocks {
|
||||
uint32_t display_clock;
|
||||
uint32_t phy_clock;
|
||||
|
|
|
@ -1144,8 +1144,8 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
|
|||
(pcie_table->pcie_lane[i] == 5) ? "x12" :
|
||||
(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
|
||||
pcie_table->clk_freq[i],
|
||||
((gen_speed - 1) == pcie_table->pcie_gen[i]) &&
|
||||
(lane_width == link_width[pcie_table->pcie_lane[i]]) ?
|
||||
(gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
|
||||
(lane_width == DECODE_LANE_WIDTH(link_width[pcie_table->pcie_lane[i]])) ?
|
||||
"*" : "");
|
||||
break;
|
||||
|
||||
|
|
|
@ -575,6 +575,14 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
|
|||
dpm_table);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (skutable->DriverReportedClocks.GameClockAc &&
|
||||
(dpm_table->dpm_levels[dpm_table->count - 1].value >
|
||||
skutable->DriverReportedClocks.GameClockAc)) {
|
||||
dpm_table->dpm_levels[dpm_table->count - 1].value =
|
||||
skutable->DriverReportedClocks.GameClockAc;
|
||||
dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
|
||||
}
|
||||
} else {
|
||||
dpm_table->count = 1;
|
||||
dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
|
||||
|
@ -828,6 +836,57 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type,
|
||||
uint32_t *min,
|
||||
uint32_t *max)
|
||||
{
|
||||
struct smu_13_0_dpm_context *dpm_context =
|
||||
smu->smu_dpm.dpm_context;
|
||||
struct smu_13_0_dpm_table *dpm_table;
|
||||
|
||||
switch (clk_type) {
|
||||
case SMU_MCLK:
|
||||
case SMU_UCLK:
|
||||
/* uclk dpm table */
|
||||
dpm_table = &dpm_context->dpm_tables.uclk_table;
|
||||
break;
|
||||
case SMU_GFXCLK:
|
||||
case SMU_SCLK:
|
||||
/* gfxclk dpm table */
|
||||
dpm_table = &dpm_context->dpm_tables.gfx_table;
|
||||
break;
|
||||
case SMU_SOCCLK:
|
||||
/* socclk dpm table */
|
||||
dpm_table = &dpm_context->dpm_tables.soc_table;
|
||||
break;
|
||||
case SMU_FCLK:
|
||||
/* fclk dpm table */
|
||||
dpm_table = &dpm_context->dpm_tables.fclk_table;
|
||||
break;
|
||||
case SMU_VCLK:
|
||||
case SMU_VCLK1:
|
||||
/* vclk dpm table */
|
||||
dpm_table = &dpm_context->dpm_tables.vclk_table;
|
||||
break;
|
||||
case SMU_DCLK:
|
||||
case SMU_DCLK1:
|
||||
/* dclk dpm table */
|
||||
dpm_table = &dpm_context->dpm_tables.dclk_table;
|
||||
break;
|
||||
default:
|
||||
dev_err(smu->adev->dev, "Unsupported clock type!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (min)
|
||||
*min = dpm_table->min;
|
||||
if (max)
|
||||
*max = dpm_table->max;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu_v13_0_7_read_sensor(struct smu_context *smu,
|
||||
enum amd_pp_sensors sensor,
|
||||
void *data,
|
||||
|
@ -1074,8 +1133,8 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
|
|||
(pcie_table->pcie_lane[i] == 5) ? "x12" :
|
||||
(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
|
||||
pcie_table->clk_freq[i],
|
||||
(gen_speed == pcie_table->pcie_gen[i]) &&
|
||||
(lane_width == pcie_table->pcie_lane[i]) ?
|
||||
(gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
|
||||
(lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
|
||||
"*" : "");
|
||||
break;
|
||||
|
||||
|
@ -1329,9 +1388,17 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
|
|||
&dpm_context->dpm_tables.fclk_table;
|
||||
struct smu_umd_pstate_table *pstate_table =
|
||||
&smu->pstate_table;
|
||||
struct smu_table_context *table_context = &smu->smu_table;
|
||||
PPTable_t *pptable = table_context->driver_pptable;
|
||||
DriverReportedClocks_t driver_clocks =
|
||||
pptable->SkuTable.DriverReportedClocks;
|
||||
|
||||
pstate_table->gfxclk_pstate.min = gfx_table->min;
|
||||
pstate_table->gfxclk_pstate.peak = gfx_table->max;
|
||||
if (driver_clocks.GameClockAc &&
|
||||
(driver_clocks.GameClockAc < gfx_table->max))
|
||||
pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
|
||||
else
|
||||
pstate_table->gfxclk_pstate.peak = gfx_table->max;
|
||||
|
||||
pstate_table->uclk_pstate.min = mem_table->min;
|
||||
pstate_table->uclk_pstate.peak = mem_table->max;
|
||||
|
@ -1348,12 +1415,12 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
|
|||
pstate_table->fclk_pstate.min = fclk_table->min;
|
||||
pstate_table->fclk_pstate.peak = fclk_table->max;
|
||||
|
||||
/*
|
||||
* For now, just use the mininum clock frequency.
|
||||
* TODO: update them when the real pstate settings available
|
||||
*/
|
||||
pstate_table->gfxclk_pstate.standard = gfx_table->min;
|
||||
pstate_table->uclk_pstate.standard = mem_table->min;
|
||||
if (driver_clocks.BaseClockAc &&
|
||||
driver_clocks.BaseClockAc < gfx_table->max)
|
||||
pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
|
||||
else
|
||||
pstate_table->gfxclk_pstate.standard = gfx_table->max;
|
||||
pstate_table->uclk_pstate.standard = mem_table->max;
|
||||
pstate_table->socclk_pstate.standard = soc_table->min;
|
||||
pstate_table->vclk_pstate.standard = vclk_table->min;
|
||||
pstate_table->dclk_pstate.standard = dclk_table->min;
|
||||
|
@ -1676,7 +1743,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
|
|||
.dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
|
||||
.init_pptable_microcode = smu_v13_0_init_pptable_microcode,
|
||||
.populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk,
|
||||
.get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
|
||||
.get_dpm_ultimate_freq = smu_v13_0_7_get_dpm_ultimate_freq,
|
||||
.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
|
||||
.read_sensor = smu_v13_0_7_read_sensor,
|
||||
.feature_is_enabled = smu_cmn_feature_is_enabled,
|
||||
|
|
|
@ -99,7 +99,6 @@ static int armada_drm_bind(struct device *dev)
|
|||
if (ret) {
|
||||
dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n",
|
||||
__func__, ret);
|
||||
kfree(priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -300,9 +300,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
|
||||
u32 dss_ctl1;
|
||||
|
||||
dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
|
||||
/* FIXME: Move all DSS handling to intel_vdsc.c */
|
||||
if (DISPLAY_VER(dev_priv) >= 12) {
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
|
||||
dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe);
|
||||
dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe);
|
||||
} else {
|
||||
dss_ctl1_reg = DSS_CTL1;
|
||||
dss_ctl2_reg = DSS_CTL2;
|
||||
}
|
||||
|
||||
dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg);
|
||||
dss_ctl1 |= SPLITTER_ENABLE;
|
||||
dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
|
||||
dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
|
||||
|
@ -323,16 +335,16 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
|
|||
|
||||
dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
|
||||
dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
|
||||
dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
|
||||
dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg);
|
||||
dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
|
||||
dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
|
||||
intel_de_write(dev_priv, DSS_CTL2, dss_ctl2);
|
||||
intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2);
|
||||
} else {
|
||||
/* Interleave */
|
||||
dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
|
||||
}
|
||||
|
||||
intel_de_write(dev_priv, DSS_CTL1, dss_ctl1);
|
||||
intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1);
|
||||
}
|
||||
|
||||
/* aka DSI 8X clock */
|
||||
|
|
|
@ -31,6 +31,7 @@ gf108_fb = {
|
|||
.init = gf100_fb_init,
|
||||
.init_page = gf100_fb_init_page,
|
||||
.intr = gf100_fb_intr,
|
||||
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||
.ram_new = gf108_ram_new,
|
||||
.default_bigpage = 17,
|
||||
};
|
||||
|
|
|
@ -77,6 +77,7 @@ gk104_fb = {
|
|||
.init = gf100_fb_init,
|
||||
.init_page = gf100_fb_init_page,
|
||||
.intr = gf100_fb_intr,
|
||||
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||
.ram_new = gk104_ram_new,
|
||||
.default_bigpage = 17,
|
||||
.clkgate_pack = gk104_fb_clkgate_pack,
|
||||
|
|
|
@ -59,6 +59,7 @@ gk110_fb = {
|
|||
.init = gf100_fb_init,
|
||||
.init_page = gf100_fb_init_page,
|
||||
.intr = gf100_fb_intr,
|
||||
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||
.ram_new = gk104_ram_new,
|
||||
.default_bigpage = 17,
|
||||
.clkgate_pack = gk110_fb_clkgate_pack,
|
||||
|
|
|
@ -31,6 +31,7 @@ gm107_fb = {
|
|||
.init = gf100_fb_init,
|
||||
.init_page = gf100_fb_init_page,
|
||||
.intr = gf100_fb_intr,
|
||||
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||
.ram_new = gm107_ram_new,
|
||||
.default_bigpage = 17,
|
||||
};
|
||||
|
|
|
@ -507,12 +507,19 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
|
|||
{
|
||||
struct drm_sched_entity *entity = sched_job->entity;
|
||||
bool first;
|
||||
ktime_t submit_ts;
|
||||
|
||||
trace_drm_sched_job(sched_job, entity);
|
||||
atomic_inc(entity->rq->sched->score);
|
||||
WRITE_ONCE(entity->last_user, current->group_leader);
|
||||
|
||||
/*
|
||||
* After the sched_job is pushed into the entity queue, it may be
|
||||
* completed and freed up at any time. We can no longer access it.
|
||||
* Make sure to set the submit_ts first, to avoid a race.
|
||||
*/
|
||||
sched_job->submit_ts = submit_ts = ktime_get();
|
||||
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
|
||||
sched_job->submit_ts = ktime_get();
|
||||
|
||||
/* first job wakes up scheduler */
|
||||
if (first) {
|
||||
|
@ -529,7 +536,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
|
|||
spin_unlock(&entity->rq_lock);
|
||||
|
||||
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
|
||||
drm_sched_rq_update_fifo(entity, sched_job->submit_ts);
|
||||
drm_sched_rq_update_fifo(entity, submit_ts);
|
||||
|
||||
drm_sched_wakeup(entity->rq->sched);
|
||||
}
|
||||
|
|
|
@ -1122,7 +1122,7 @@ config HID_TOPRE
|
|||
tristate "Topre REALFORCE keyboards"
|
||||
depends on HID
|
||||
help
|
||||
Say Y for N-key rollover support on Topre REALFORCE R2 108 key keyboards.
|
||||
Say Y for N-key rollover support on Topre REALFORCE R2 108/87 key keyboards.
|
||||
|
||||
config HID_THINGM
|
||||
tristate "ThingM blink(1) USB RGB LED"
|
||||
|
|
|
@ -420,6 +420,9 @@
|
|||
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
|
||||
#define I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN 0x2A1C
|
||||
#define I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN 0x279F
|
||||
#define I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100 0x29F5
|
||||
#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1 0x2BED
|
||||
#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2 0x2BEE
|
||||
|
||||
#define USB_VENDOR_ID_ELECOM 0x056e
|
||||
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
|
||||
|
@ -1249,6 +1252,7 @@
|
|||
|
||||
#define USB_VENDOR_ID_TOPRE 0x0853
|
||||
#define USB_DEVICE_ID_TOPRE_REALFORCE_R2_108 0x0148
|
||||
#define USB_DEVICE_ID_TOPRE_REALFORCE_R2_87 0x0146
|
||||
|
||||
#define USB_VENDOR_ID_TOPSEED 0x0766
|
||||
#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204
|
||||
|
|
|
@ -398,6 +398,12 @@ static const struct hid_device_id hid_battery_quirks[] = {
|
|||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -940,7 +940,7 @@ hid_sensor_register_platform_device(struct platform_device *pdev,
|
|||
struct hid_sensor_hub_device *hsdev,
|
||||
const struct hid_sensor_custom_match *match)
|
||||
{
|
||||
char real_usage[HID_SENSOR_USAGE_LENGTH];
|
||||
char real_usage[HID_SENSOR_USAGE_LENGTH] = { 0 };
|
||||
struct platform_device *custom_pdev;
|
||||
const char *dev_name;
|
||||
char *c;
|
||||
|
|
|
@ -36,6 +36,8 @@ static __u8 *topre_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|||
static const struct hid_device_id topre_id_table[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
|
||||
USB_DEVICE_ID_TOPRE_REALFORCE_R2_108) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
|
||||
USB_DEVICE_ID_TOPRE_REALFORCE_R2_87) },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(hid, topre_id_table);
|
||||
|
|
|
@ -241,8 +241,8 @@ static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
|
|||
struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
|
||||
struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
|
||||
|
||||
return guid_equal(&driver->id[0].guid,
|
||||
&device->fw_client->props.protocol_name);
|
||||
return(device->fw_client ? guid_equal(&driver->id[0].guid,
|
||||
&device->fw_client->props.protocol_name) : 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -472,7 +472,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
|
|||
if (etm4x_sspcicrn_present(drvdata, i))
|
||||
etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i));
|
||||
}
|
||||
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
|
||||
for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
|
||||
etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i));
|
||||
etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i));
|
||||
}
|
||||
|
@ -1070,25 +1070,21 @@ static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata,
|
|||
struct csdev_access *csa)
|
||||
{
|
||||
u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
|
||||
u32 idr1 = readl_relaxed(drvdata->base + TRCIDR1);
|
||||
|
||||
/*
|
||||
* All ETMs must implement TRCDEVARCH to indicate that
|
||||
* the component is an ETMv4. To support any broken
|
||||
* implementations we fall back to TRCIDR1 check, which
|
||||
* is not really reliable.
|
||||
* the component is an ETMv4. Even though TRCIDR1 also
|
||||
* contains the information, it is part of the "Trace"
|
||||
* register and must be accessed with the OSLK cleared,
|
||||
* with MMIO. But we cannot touch the OSLK until we are
|
||||
* sure this is an ETM. So rely only on the TRCDEVARCH.
|
||||
*/
|
||||
if ((devarch & ETM_DEVARCH_ID_MASK) == ETM_DEVARCH_ETMv4x_ARCH) {
|
||||
drvdata->arch = etm_devarch_to_arch(devarch);
|
||||
} else {
|
||||
pr_warn("CPU%d: ETM4x incompatible TRCDEVARCH: %x, falling back to TRCIDR1\n",
|
||||
smp_processor_id(), devarch);
|
||||
|
||||
if (ETM_TRCIDR1_ARCH_MAJOR(idr1) != ETM_TRCIDR1_ARCH_ETMv4)
|
||||
return false;
|
||||
drvdata->arch = etm_trcidr_to_arch(idr1);
|
||||
if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH) {
|
||||
pr_warn_once("TRCDEVARCH doesn't match ETMv4 architecture\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
drvdata->arch = etm_devarch_to_arch(devarch);
|
||||
*csa = CSDEV_ACCESS_IOMEM(drvdata->base);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -753,14 +753,12 @@
|
|||
* TRCDEVARCH - CoreSight architected register
|
||||
* - Bits[15:12] - Major version
|
||||
* - Bits[19:16] - Minor version
|
||||
* TRCIDR1 - ETM architected register
|
||||
* - Bits[11:8] - Major version
|
||||
* - Bits[7:4] - Minor version
|
||||
* We must rely on TRCDEVARCH for the version information,
|
||||
* however we don't want to break the support for potential
|
||||
* old implementations which might not implement it. Thus
|
||||
* we fall back to TRCIDR1 if TRCDEVARCH is not implemented
|
||||
* for memory mapped components.
|
||||
*
|
||||
* We must rely only on TRCDEVARCH for the version information. Even though,
|
||||
* TRCIDR1 also provides the architecture version, it is a "Trace" register
|
||||
* and as such must be accessed only with Trace power domain ON. This may
|
||||
* not be available at probe time.
|
||||
*
|
||||
* Now to make certain decisions easier based on the version
|
||||
* we use an internal representation of the version in the
|
||||
* driver, as follows :
|
||||
|
@ -786,12 +784,6 @@ static inline u8 etm_devarch_to_arch(u32 devarch)
|
|||
ETM_DEVARCH_REVISION(devarch));
|
||||
}
|
||||
|
||||
static inline u8 etm_trcidr_to_arch(u32 trcidr1)
|
||||
{
|
||||
return ETM_ARCH_VERSION(ETM_TRCIDR1_ARCH_MAJOR(trcidr1),
|
||||
ETM_TRCIDR1_ARCH_MINOR(trcidr1));
|
||||
}
|
||||
|
||||
enum etm_impdef_type {
|
||||
ETM4_IMPDEF_HISI_CORE_COMMIT,
|
||||
ETM4_IMPDEF_FEATURE_MAX,
|
||||
|
|
|
@ -178,6 +178,11 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
|
|||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear the flag before adding the device so that fw_devlink
|
||||
* doesn't skip adding consumers to this device.
|
||||
*/
|
||||
rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
|
||||
client = of_i2c_register_device(adap, rd->dn);
|
||||
if (IS_ERR(client)) {
|
||||
dev_err(&adap->dev, "failed to create client for '%pOF'\n",
|
||||
|
|
|
@ -864,7 +864,7 @@ static irqreturn_t kx022a_trigger_handler(int irq, void *p)
|
|||
if (ret < 0)
|
||||
goto err_read;
|
||||
|
||||
iio_push_to_buffers_with_timestamp(idev, data->buffer, pf->timestamp);
|
||||
iio_push_to_buffers_with_timestamp(idev, data->buffer, data->timestamp);
|
||||
err_read:
|
||||
iio_trigger_notify_done(idev->trig);
|
||||
|
||||
|
|
|
@ -253,7 +253,7 @@ static const struct ad_sigma_delta_info ad7791_sigma_delta_info = {
|
|||
.has_registers = true,
|
||||
.addr_shift = 4,
|
||||
.read_mask = BIT(3),
|
||||
.irq_flags = IRQF_TRIGGER_LOW,
|
||||
.irq_flags = IRQF_TRIGGER_FALLING,
|
||||
};
|
||||
|
||||
static int ad7791_read_raw(struct iio_dev *indio_dev,
|
||||
|
|
|
@ -28,7 +28,6 @@ struct ltc2497_driverdata {
|
|||
struct ltc2497core_driverdata common_ddata;
|
||||
struct i2c_client *client;
|
||||
u32 recv_size;
|
||||
u32 sub_lsb;
|
||||
/*
|
||||
* DMA (thus cache coherency maintenance) may require the
|
||||
* transfer buffers to live in their own cache lines.
|
||||
|
@ -65,10 +64,10 @@ static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
|
|||
* equivalent to a sign extension.
|
||||
*/
|
||||
if (st->recv_size == 3) {
|
||||
*val = (get_unaligned_be24(st->data.d8) >> st->sub_lsb)
|
||||
*val = (get_unaligned_be24(st->data.d8) >> 6)
|
||||
- BIT(ddata->chip_info->resolution + 1);
|
||||
} else {
|
||||
*val = (be32_to_cpu(st->data.d32) >> st->sub_lsb)
|
||||
*val = (be32_to_cpu(st->data.d32) >> 6)
|
||||
- BIT(ddata->chip_info->resolution + 1);
|
||||
}
|
||||
|
||||
|
@ -122,7 +121,6 @@ static int ltc2497_probe(struct i2c_client *client)
|
|||
st->common_ddata.chip_info = chip_info;
|
||||
|
||||
resolution = chip_info->resolution;
|
||||
st->sub_lsb = 31 - (resolution + 1);
|
||||
st->recv_size = BITS_TO_BYTES(resolution) + 1;
|
||||
|
||||
return ltc2497core_probe(dev, indio_dev);
|
||||
|
|
|
@ -414,13 +414,17 @@ static int max11410_sample(struct max11410_state *st, int *sample_raw,
|
|||
if (!ret)
|
||||
return -ETIMEDOUT;
|
||||
} else {
|
||||
int ret2;
|
||||
|
||||
/* Wait for status register Conversion Ready flag */
|
||||
ret = read_poll_timeout(max11410_read_reg, ret,
|
||||
ret || (val & MAX11410_STATUS_CONV_READY_BIT),
|
||||
ret = read_poll_timeout(max11410_read_reg, ret2,
|
||||
ret2 || (val & MAX11410_STATUS_CONV_READY_BIT),
|
||||
5000, MAX11410_CONVERSION_TIMEOUT_MS * 1000,
|
||||
true, st, MAX11410_REG_STATUS, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret2)
|
||||
return ret2;
|
||||
}
|
||||
|
||||
/* Read ADC Data */
|
||||
|
@ -851,17 +855,21 @@ static int max11410_init_vref(struct device *dev,
|
|||
|
||||
static int max11410_calibrate(struct max11410_state *st, u32 cal_type)
|
||||
{
|
||||
int ret, val;
|
||||
int ret, ret2, val;
|
||||
|
||||
ret = max11410_write_reg(st, MAX11410_REG_CAL_START, cal_type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Wait for status register Calibration Ready flag */
|
||||
return read_poll_timeout(max11410_read_reg, ret,
|
||||
ret || (val & MAX11410_STATUS_CAL_READY_BIT),
|
||||
50000, MAX11410_CALIB_TIMEOUT_MS * 1000, true,
|
||||
st, MAX11410_REG_STATUS, &val);
|
||||
ret = read_poll_timeout(max11410_read_reg, ret2,
|
||||
ret2 || (val & MAX11410_STATUS_CAL_READY_BIT),
|
||||
50000, MAX11410_CALIB_TIMEOUT_MS * 1000, true,
|
||||
st, MAX11410_REG_STATUS, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ret2;
|
||||
}
|
||||
|
||||
static int max11410_self_calibrate(struct max11410_state *st)
|
||||
|
|
|
@ -639,7 +639,7 @@ out:
|
|||
|
||||
static int palmas_gpadc_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct iio_dev *indio_dev = dev_to_iio_dev(&pdev->dev);
|
||||
struct iio_dev *indio_dev = dev_get_drvdata(&pdev->dev);
|
||||
struct palmas_gpadc *adc = iio_priv(indio_dev);
|
||||
|
||||
if (adc->wakeup1_enable || adc->wakeup2_enable)
|
||||
|
|
|
@ -628,12 +628,20 @@ static int adc5_get_fw_channel_data(struct adc5_chip *adc,
|
|||
struct fwnode_handle *fwnode,
|
||||
const struct adc5_data *data)
|
||||
{
|
||||
const char *name = fwnode_get_name(fwnode), *channel_name;
|
||||
const char *channel_name;
|
||||
char *name;
|
||||
u32 chan, value, varr[2];
|
||||
u32 sid = 0;
|
||||
int ret;
|
||||
struct device *dev = adc->dev;
|
||||
|
||||
name = devm_kasprintf(dev, GFP_KERNEL, "%pfwP", fwnode);
|
||||
if (!name)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Cut the address part */
|
||||
name[strchrnul(name, '@') - name] = '\0';
|
||||
|
||||
ret = fwnode_property_read_u32(fwnode, "reg", &chan);
|
||||
if (ret) {
|
||||
dev_err(dev, "invalid channel number %s\n", name);
|
||||
|
|
|
@ -634,6 +634,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
|
|||
st->chip.label = dev_name(&st->spi->dev);
|
||||
st->chip.parent = &st->spi->dev;
|
||||
st->chip.owner = THIS_MODULE;
|
||||
st->chip.can_sleep = true;
|
||||
st->chip.base = -1;
|
||||
st->chip.ngpio = TI_ADS7950_NUM_GPIOS;
|
||||
st->chip.get_direction = ti_ads7950_get_direction;
|
||||
|
|
|
@ -66,8 +66,8 @@ static int cio_dac_write_raw(struct iio_dev *indio_dev,
|
|||
if (mask != IIO_CHAN_INFO_RAW)
|
||||
return -EINVAL;
|
||||
|
||||
/* DAC can only accept up to a 16-bit value */
|
||||
if ((unsigned int)val > 65535)
|
||||
/* DAC can only accept up to a 12-bit value */
|
||||
if ((unsigned int)val > 4095)
|
||||
return -EINVAL;
|
||||
|
||||
priv->chan_out_states[chan->channel] = val;
|
||||
|
|
|
@ -47,6 +47,7 @@ config ADIS16480
|
|||
depends on SPI
|
||||
select IIO_ADIS_LIB
|
||||
select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
|
||||
select CRC32
|
||||
help
|
||||
Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
|
||||
ADIS16485, ADIS16488 inertial sensors.
|
||||
|
|
|
@ -203,24 +203,27 @@ static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
|
|||
break;
|
||||
}
|
||||
|
||||
if (filp->f_flags & O_NONBLOCK) {
|
||||
if (!written)
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
|
||||
wait_woken(&wait, TASK_INTERRUPTIBLE,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = rb->access->write(rb, n - written, buf + written);
|
||||
if (ret == 0 && (filp->f_flags & O_NONBLOCK))
|
||||
ret = -EAGAIN;
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (ret > 0) {
|
||||
written += ret;
|
||||
if (written != n && !(filp->f_flags & O_NONBLOCK))
|
||||
continue;
|
||||
}
|
||||
} while (ret == 0);
|
||||
written += ret;
|
||||
|
||||
} while (written != n);
|
||||
remove_wait_queue(&rb->pollq, &wait);
|
||||
|
||||
return ret < 0 ? ret : n;
|
||||
return ret < 0 ? ret : written;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -429,6 +429,14 @@ static const struct iio_info cm32181_info = {
|
|||
.attrs = &cm32181_attribute_group,
|
||||
};
|
||||
|
||||
static void cm32181_unregister_dummy_client(void *data)
|
||||
{
|
||||
struct i2c_client *client = data;
|
||||
|
||||
/* Unregister the dummy client */
|
||||
i2c_unregister_device(client);
|
||||
}
|
||||
|
||||
static int cm32181_probe(struct i2c_client *client)
|
||||
{
|
||||
struct device *dev = &client->dev;
|
||||
|
@ -460,6 +468,10 @@ static int cm32181_probe(struct i2c_client *client)
|
|||
client = i2c_acpi_new_device(dev, 1, &board_info);
|
||||
if (IS_ERR(client))
|
||||
return PTR_ERR(client);
|
||||
|
||||
ret = devm_add_action_or_reset(dev, cm32181_unregister_dummy_client, client);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
cm32181 = iio_priv(indio_dev);
|
||||
|
|
|
@ -208,7 +208,6 @@ static int vcnl4000_init(struct vcnl4000_data *data)
|
|||
|
||||
data->rev = ret & 0xf;
|
||||
data->al_scale = 250000;
|
||||
mutex_init(&data->vcnl4000_lock);
|
||||
|
||||
return data->chip_spec->set_power_state(data, true);
|
||||
};
|
||||
|
@ -1367,6 +1366,8 @@ static int vcnl4000_probe(struct i2c_client *client)
|
|||
data->id = id->driver_data;
|
||||
data->chip_spec = &vcnl4000_chip_spec_cfg[data->id];
|
||||
|
||||
mutex_init(&data->vcnl4000_lock);
|
||||
|
||||
ret = data->chip_spec->init(data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
|
@ -153,7 +153,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
|
|||
mtdblk->cache_state = STATE_EMPTY;
|
||||
ret = mtd_read(mtd, sect_start, sect_size,
|
||||
&retlen, mtdblk->cache_data);
|
||||
if (ret)
|
||||
if (ret && !mtd_is_bitflip(ret))
|
||||
return ret;
|
||||
if (retlen != sect_size)
|
||||
return -EIO;
|
||||
|
@ -188,8 +188,12 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
|
|||
pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
|
||||
mtd->name, pos, len);
|
||||
|
||||
if (!sect_size)
|
||||
return mtd_read(mtd, pos, len, &retlen, buf);
|
||||
if (!sect_size) {
|
||||
ret = mtd_read(mtd, pos, len, &retlen, buf);
|
||||
if (ret && !mtd_is_bitflip(ret))
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (len > 0) {
|
||||
unsigned long sect_start = (pos/sect_size)*sect_size;
|
||||
|
@ -209,7 +213,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
|
|||
memcpy (buf, mtdblk->cache_data + offset, size);
|
||||
} else {
|
||||
ret = mtd_read(mtd, pos, size, &retlen, buf);
|
||||
if (ret)
|
||||
if (ret && !mtd_is_bitflip(ret))
|
||||
return ret;
|
||||
if (retlen != size)
|
||||
return -EIO;
|
||||
|
|
|
@ -280,7 +280,7 @@ static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
|
|||
|
||||
if (raw) {
|
||||
len = mtd->writesize + mtd->oobsize;
|
||||
cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir);
|
||||
cmd = (len & GENMASK(13, 0)) | scrambler | DMA_DIR(dir);
|
||||
writel(cmd, nfc->reg_base + NFC_REG_CMD);
|
||||
return;
|
||||
}
|
||||
|
@ -544,7 +544,7 @@ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
cmd = NFC_CMD_N2M | (len & GENMASK(5, 0));
|
||||
cmd = NFC_CMD_N2M | (len & GENMASK(13, 0));
|
||||
writel(cmd, nfc->reg_base + NFC_REG_CMD);
|
||||
|
||||
meson_nfc_drain_cmd(nfc);
|
||||
|
@ -568,7 +568,7 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
cmd = NFC_CMD_M2N | (len & GENMASK(5, 0));
|
||||
cmd = NFC_CMD_M2N | (len & GENMASK(13, 0));
|
||||
writel(cmd, nfc->reg_base + NFC_REG_CMD);
|
||||
|
||||
meson_nfc_drain_cmd(nfc);
|
||||
|
|
|
@ -1531,6 +1531,9 @@ static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
|
|||
if (IS_ERR(sdrt))
|
||||
return PTR_ERR(sdrt);
|
||||
|
||||
if (conf->timings.mode > 3)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -3269,7 +3269,8 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
|
|||
|
||||
combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
|
||||
if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
|
||||
combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
|
||||
(combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION &&
|
||||
combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT))
|
||||
goto out;
|
||||
|
||||
saddr = &combined->ip6.saddr;
|
||||
|
@ -3291,7 +3292,7 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
|
|||
else if (curr_active_slave &&
|
||||
time_after(slave_last_rx(bond, curr_active_slave),
|
||||
curr_active_slave->last_link_up))
|
||||
bond_validate_na(bond, slave, saddr, daddr);
|
||||
bond_validate_na(bond, slave, daddr, saddr);
|
||||
else if (curr_arp_slave &&
|
||||
bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
|
||||
bond_validate_na(bond, slave, saddr, daddr);
|
||||
|
|
|
@ -1063,6 +1063,10 @@ static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
|
|||
}
|
||||
#endif
|
||||
addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
|
||||
#ifdef CONFIG_MACB_USE_HWSTAMP
|
||||
if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
|
||||
addr &= ~GEM_BIT(DMA_RXVALID);
|
||||
#endif
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
|
|
@ -989,6 +989,20 @@ static int enetc_get_mm(struct net_device *ndev, struct ethtool_mm_state *state)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* FIXME: Workaround for the link partner's verification failing if ENETC
|
||||
* priorly received too much express traffic. The documentation doesn't
|
||||
* suggest this is needed.
|
||||
*/
|
||||
static void enetc_restart_emac_rx(struct enetc_si *si)
|
||||
{
|
||||
u32 val = enetc_port_rd(&si->hw, ENETC_PM0_CMD_CFG);
|
||||
|
||||
enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val & ~ENETC_PM0_RX_EN);
|
||||
|
||||
if (val & ENETC_PM0_RX_EN)
|
||||
enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val);
|
||||
}
|
||||
|
||||
static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
|
@ -1040,6 +1054,8 @@ static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
|
|||
|
||||
enetc_port_wr(hw, ENETC_MMCSR, val);
|
||||
|
||||
enetc_restart_emac_rx(priv->si);
|
||||
|
||||
mutex_unlock(&priv->mm_lock);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -58,8 +58,6 @@ enum iavf_vsi_state_t {
|
|||
struct iavf_vsi {
|
||||
struct iavf_adapter *back;
|
||||
struct net_device *netdev;
|
||||
unsigned long active_cvlans[BITS_TO_LONGS(VLAN_N_VID)];
|
||||
unsigned long active_svlans[BITS_TO_LONGS(VLAN_N_VID)];
|
||||
u16 seid;
|
||||
u16 id;
|
||||
DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
|
||||
|
@ -157,15 +155,20 @@ struct iavf_vlan {
|
|||
u16 tpid;
|
||||
};
|
||||
|
||||
enum iavf_vlan_state_t {
|
||||
IAVF_VLAN_INVALID,
|
||||
IAVF_VLAN_ADD, /* filter needs to be added */
|
||||
IAVF_VLAN_IS_NEW, /* filter is new, wait for PF answer */
|
||||
IAVF_VLAN_ACTIVE, /* filter is accepted by PF */
|
||||
IAVF_VLAN_DISABLE, /* filter needs to be deleted by PF, then marked INACTIVE */
|
||||
IAVF_VLAN_INACTIVE, /* filter is inactive, we are in IFF_DOWN */
|
||||
IAVF_VLAN_REMOVE, /* filter needs to be removed from list */
|
||||
};
|
||||
|
||||
struct iavf_vlan_filter {
|
||||
struct list_head list;
|
||||
struct iavf_vlan vlan;
|
||||
struct {
|
||||
u8 is_new_vlan:1; /* filter is new, wait for PF answer */
|
||||
u8 remove:1; /* filter needs to be removed */
|
||||
u8 add:1; /* filter needs to be added */
|
||||
u8 padding:5;
|
||||
};
|
||||
enum iavf_vlan_state_t state;
|
||||
};
|
||||
|
||||
#define IAVF_MAX_TRAFFIC_CLASS 4
|
||||
|
@ -257,6 +260,7 @@ struct iavf_adapter {
|
|||
wait_queue_head_t vc_waitqueue;
|
||||
struct iavf_q_vector *q_vectors;
|
||||
struct list_head vlan_filter_list;
|
||||
int num_vlan_filters;
|
||||
struct list_head mac_filter_list;
|
||||
struct mutex crit_lock;
|
||||
struct mutex client_lock;
|
||||
|
|
|
@ -791,7 +791,8 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
|
|||
f->vlan = vlan;
|
||||
|
||||
list_add_tail(&f->list, &adapter->vlan_filter_list);
|
||||
f->add = true;
|
||||
f->state = IAVF_VLAN_ADD;
|
||||
adapter->num_vlan_filters++;
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
|
||||
}
|
||||
|
||||
|
@ -813,7 +814,7 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
|
|||
|
||||
f = iavf_find_vlan(adapter, vlan);
|
||||
if (f) {
|
||||
f->remove = true;
|
||||
f->state = IAVF_VLAN_REMOVE;
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
|
||||
}
|
||||
|
||||
|
@ -828,14 +829,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
|
|||
**/
|
||||
static void iavf_restore_filters(struct iavf_adapter *adapter)
|
||||
{
|
||||
u16 vid;
|
||||
struct iavf_vlan_filter *f;
|
||||
|
||||
/* re-add all VLAN filters */
|
||||
for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
|
||||
iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
|
||||
spin_lock_bh(&adapter->mac_vlan_list_lock);
|
||||
|
||||
for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
|
||||
iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
|
||||
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
|
||||
if (f->state == IAVF_VLAN_INACTIVE)
|
||||
f->state = IAVF_VLAN_ADD;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&adapter->mac_vlan_list_lock);
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -844,8 +849,7 @@ static void iavf_restore_filters(struct iavf_adapter *adapter)
|
|||
*/
|
||||
u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
|
||||
{
|
||||
return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
|
||||
bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
|
||||
return adapter->num_vlan_filters;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -928,11 +932,6 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
|
|||
return 0;
|
||||
|
||||
iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
|
||||
if (proto == cpu_to_be16(ETH_P_8021Q))
|
||||
clear_bit(vid, adapter->vsi.active_cvlans);
|
||||
else
|
||||
clear_bit(vid, adapter->vsi.active_svlans);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1293,16 +1292,11 @@ static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
|
|||
}
|
||||
}
|
||||
|
||||
/* remove all VLAN filters */
|
||||
/* disable all VLAN filters */
|
||||
list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
|
||||
list) {
|
||||
if (vlf->add) {
|
||||
list_del(&vlf->list);
|
||||
kfree(vlf);
|
||||
} else {
|
||||
vlf->remove = true;
|
||||
}
|
||||
}
|
||||
list)
|
||||
vlf->state = IAVF_VLAN_DISABLE;
|
||||
|
||||
spin_unlock_bh(&adapter->mac_vlan_list_lock);
|
||||
}
|
||||
|
||||
|
@ -2914,6 +2908,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
|
|||
list_del(&fv->list);
|
||||
kfree(fv);
|
||||
}
|
||||
adapter->num_vlan_filters = 0;
|
||||
|
||||
spin_unlock_bh(&adapter->mac_vlan_list_lock);
|
||||
|
||||
|
@ -3131,9 +3126,6 @@ continue_reset:
|
|||
adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
|
||||
iavf_misc_irq_enable(adapter);
|
||||
|
||||
bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
|
||||
bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
|
||||
|
||||
mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
|
||||
|
||||
/* We were running when the reset started, so we need to restore some
|
||||
|
|
|
@ -642,16 +642,10 @@ static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
|
|||
|
||||
spin_lock_bh(&adapter->mac_vlan_list_lock);
|
||||
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
|
||||
if (f->is_new_vlan) {
|
||||
if (f->vlan.tpid == ETH_P_8021Q)
|
||||
clear_bit(f->vlan.vid,
|
||||
adapter->vsi.active_cvlans);
|
||||
else
|
||||
clear_bit(f->vlan.vid,
|
||||
adapter->vsi.active_svlans);
|
||||
|
||||
if (f->state == IAVF_VLAN_IS_NEW) {
|
||||
list_del(&f->list);
|
||||
kfree(f);
|
||||
adapter->num_vlan_filters--;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&adapter->mac_vlan_list_lock);
|
||||
|
@ -679,7 +673,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
|
|||
spin_lock_bh(&adapter->mac_vlan_list_lock);
|
||||
|
||||
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
|
||||
if (f->add)
|
||||
if (f->state == IAVF_VLAN_ADD)
|
||||
count++;
|
||||
}
|
||||
if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
|
||||
|
@ -710,11 +704,10 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
|
|||
vvfl->vsi_id = adapter->vsi_res->vsi_id;
|
||||
vvfl->num_elements = count;
|
||||
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
|
||||
if (f->add) {
|
||||
if (f->state == IAVF_VLAN_ADD) {
|
||||
vvfl->vlan_id[i] = f->vlan.vid;
|
||||
i++;
|
||||
f->add = false;
|
||||
f->is_new_vlan = true;
|
||||
f->state = IAVF_VLAN_IS_NEW;
|
||||
if (i == count)
|
||||
break;
|
||||
}
|
||||
|
@ -760,7 +753,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
|
|||
vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
|
||||
vvfl_v2->num_elements = count;
|
||||
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
|
||||
if (f->add) {
|
||||
if (f->state == IAVF_VLAN_ADD) {
|
||||
struct virtchnl_vlan_supported_caps *filtering_support =
|
||||
&adapter->vlan_v2_caps.filtering.filtering_support;
|
||||
struct virtchnl_vlan *vlan;
|
||||
|
@ -778,8 +771,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
|
|||
vlan->tpid = f->vlan.tpid;
|
||||
|
||||
i++;
|
||||
f->add = false;
|
||||
f->is_new_vlan = true;
|
||||
f->state = IAVF_VLAN_IS_NEW;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -822,10 +814,16 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
|
|||
* filters marked for removal to enable bailing out before
|
||||
* sending a virtchnl message
|
||||
*/
|
||||
if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) {
|
||||
if (f->state == IAVF_VLAN_REMOVE &&
|
||||
!VLAN_FILTERING_ALLOWED(adapter)) {
|
||||
list_del(&f->list);
|
||||
kfree(f);
|
||||
} else if (f->remove) {
|
||||
adapter->num_vlan_filters--;
|
||||
} else if (f->state == IAVF_VLAN_DISABLE &&
|
||||
!VLAN_FILTERING_ALLOWED(adapter)) {
|
||||
f->state = IAVF_VLAN_INACTIVE;
|
||||
} else if (f->state == IAVF_VLAN_REMOVE ||
|
||||
f->state == IAVF_VLAN_DISABLE) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
@ -857,11 +855,18 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
|
|||
vvfl->vsi_id = adapter->vsi_res->vsi_id;
|
||||
vvfl->num_elements = count;
|
||||
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
|
||||
if (f->remove) {
|
||||
if (f->state == IAVF_VLAN_DISABLE) {
|
||||
vvfl->vlan_id[i] = f->vlan.vid;
|
||||
f->state = IAVF_VLAN_INACTIVE;
|
||||
i++;
|
||||
if (i == count)
|
||||
break;
|
||||
} else if (f->state == IAVF_VLAN_REMOVE) {
|
||||
vvfl->vlan_id[i] = f->vlan.vid;
|
||||
list_del(&f->list);
|
||||
kfree(f);
|
||||
adapter->num_vlan_filters--;
|
||||
i++;
|
||||
if (i == count)
|
||||
break;
|
||||
}
|
||||
|
@ -901,7 +906,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
|
|||
vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
|
||||
vvfl_v2->num_elements = count;
|
||||
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
|
||||
if (f->remove) {
|
||||
if (f->state == IAVF_VLAN_DISABLE ||
|
||||
f->state == IAVF_VLAN_REMOVE) {
|
||||
struct virtchnl_vlan_supported_caps *filtering_support =
|
||||
&adapter->vlan_v2_caps.filtering.filtering_support;
|
||||
struct virtchnl_vlan *vlan;
|
||||
|
@ -915,8 +921,13 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
|
|||
vlan->tci = f->vlan.vid;
|
||||
vlan->tpid = f->vlan.tpid;
|
||||
|
||||
list_del(&f->list);
|
||||
kfree(f);
|
||||
if (f->state == IAVF_VLAN_DISABLE) {
|
||||
f->state = IAVF_VLAN_INACTIVE;
|
||||
} else {
|
||||
list_del(&f->list);
|
||||
kfree(f);
|
||||
adapter->num_vlan_filters--;
|
||||
}
|
||||
i++;
|
||||
if (i == count)
|
||||
break;
|
||||
|
@ -2192,7 +2203,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
|
|||
list_for_each_entry(vlf,
|
||||
&adapter->vlan_filter_list,
|
||||
list)
|
||||
vlf->add = true;
|
||||
vlf->state = IAVF_VLAN_ADD;
|
||||
|
||||
adapter->aq_required |=
|
||||
IAVF_FLAG_AQ_ADD_VLAN_FILTER;
|
||||
|
@ -2260,7 +2271,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
|
|||
list_for_each_entry(vlf,
|
||||
&adapter->vlan_filter_list,
|
||||
list)
|
||||
vlf->add = true;
|
||||
vlf->state = IAVF_VLAN_ADD;
|
||||
|
||||
aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
|
||||
}
|
||||
|
@ -2444,15 +2455,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
|
|||
|
||||
spin_lock_bh(&adapter->mac_vlan_list_lock);
|
||||
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
|
||||
if (f->is_new_vlan) {
|
||||
f->is_new_vlan = false;
|
||||
if (f->vlan.tpid == ETH_P_8021Q)
|
||||
set_bit(f->vlan.vid,
|
||||
adapter->vsi.active_cvlans);
|
||||
else
|
||||
set_bit(f->vlan.vid,
|
||||
adapter->vsi.active_svlans);
|
||||
}
|
||||
if (f->state == IAVF_VLAN_IS_NEW)
|
||||
f->state = IAVF_VLAN_ACTIVE;
|
||||
}
|
||||
spin_unlock_bh(&adapter->mac_vlan_list_lock);
|
||||
}
|
||||
|
|
|
@ -681,14 +681,32 @@ int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
|
||||
int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
|
||||
enum xdp_rss_hash_type *rss_type)
|
||||
{
|
||||
struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
|
||||
struct mlx4_cqe *cqe = _ctx->cqe;
|
||||
enum xdp_rss_hash_type xht = 0;
|
||||
__be16 status;
|
||||
|
||||
if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
|
||||
return -ENODATA;
|
||||
|
||||
*hash = be32_to_cpu(_ctx->cqe->immed_rss_invalid);
|
||||
*hash = be32_to_cpu(cqe->immed_rss_invalid);
|
||||
status = cqe->status;
|
||||
if (status & cpu_to_be16(MLX4_CQE_STATUS_TCP))
|
||||
xht = XDP_RSS_L4_TCP;
|
||||
if (status & cpu_to_be16(MLX4_CQE_STATUS_UDP))
|
||||
xht = XDP_RSS_L4_UDP;
|
||||
if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV4F))
|
||||
xht |= XDP_RSS_L3_IPV4;
|
||||
if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) {
|
||||
xht |= XDP_RSS_L3_IPV6;
|
||||
if (cqe->ipv6_ext_mask)
|
||||
xht |= XDP_RSS_L3_DYNHDR;
|
||||
}
|
||||
*rss_type = xht;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -798,7 +798,8 @@ int mlx4_en_netdev_event(struct notifier_block *this,
|
|||
|
||||
struct xdp_md;
|
||||
int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp);
|
||||
int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash);
|
||||
int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
|
||||
enum xdp_rss_hash_type *rss_type);
|
||||
|
||||
/*
|
||||
* Functions for time stamping
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <net/xdp_sock_drv.h>
|
||||
#include "en/xdp.h"
|
||||
#include "en/params.h"
|
||||
#include <linux/bitfield.h>
|
||||
|
||||
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
|
||||
{
|
||||
|
@ -169,14 +170,72 @@ static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
|
||||
/* Mapping HW RSS Type bits CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 into 4-bits*/
|
||||
#define RSS_TYPE_MAX_TABLE 16 /* 4-bits max 16 entries */
|
||||
#define RSS_L4 GENMASK(1, 0)
|
||||
#define RSS_L3 GENMASK(3, 2) /* Same as CQE_RSS_HTYPE_IP */
|
||||
|
||||
/* Valid combinations of CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 sorted numerical */
|
||||
enum mlx5_rss_hash_type {
|
||||
RSS_TYPE_NO_HASH = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IP_NONE) |
|
||||
FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)),
|
||||
RSS_TYPE_L3_IPV4 = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
|
||||
FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)),
|
||||
RSS_TYPE_L4_IPV4_TCP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
|
||||
FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)),
|
||||
RSS_TYPE_L4_IPV4_UDP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
|
||||
FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)),
|
||||
RSS_TYPE_L4_IPV4_IPSEC = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
|
||||
FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)),
|
||||
RSS_TYPE_L3_IPV6 = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
|
||||
FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)),
|
||||
RSS_TYPE_L4_IPV6_TCP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
|
||||
FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)),
|
||||
RSS_TYPE_L4_IPV6_UDP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
|
||||
FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)),
|
||||
RSS_TYPE_L4_IPV6_IPSEC = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
|
||||
FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)),
|
||||
};
|
||||
|
||||
/* Invalid combinations will simply return zero, allows no boundary checks */
|
||||
static const enum xdp_rss_hash_type mlx5_xdp_rss_type[RSS_TYPE_MAX_TABLE] = {
|
||||
[RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_NONE,
|
||||
[1] = XDP_RSS_TYPE_NONE, /* Implicit zero */
|
||||
[2] = XDP_RSS_TYPE_NONE, /* Implicit zero */
|
||||
[3] = XDP_RSS_TYPE_NONE, /* Implicit zero */
|
||||
[RSS_TYPE_L3_IPV4] = XDP_RSS_TYPE_L3_IPV4,
|
||||
[RSS_TYPE_L4_IPV4_TCP] = XDP_RSS_TYPE_L4_IPV4_TCP,
|
||||
[RSS_TYPE_L4_IPV4_UDP] = XDP_RSS_TYPE_L4_IPV4_UDP,
|
||||
[RSS_TYPE_L4_IPV4_IPSEC] = XDP_RSS_TYPE_L4_IPV4_IPSEC,
|
||||
[RSS_TYPE_L3_IPV6] = XDP_RSS_TYPE_L3_IPV6,
|
||||
[RSS_TYPE_L4_IPV6_TCP] = XDP_RSS_TYPE_L4_IPV6_TCP,
|
||||
[RSS_TYPE_L4_IPV6_UDP] = XDP_RSS_TYPE_L4_IPV6_UDP,
|
||||
[RSS_TYPE_L4_IPV6_IPSEC] = XDP_RSS_TYPE_L4_IPV6_IPSEC,
|
||||
[12] = XDP_RSS_TYPE_NONE, /* Implicit zero */
|
||||
[13] = XDP_RSS_TYPE_NONE, /* Implicit zero */
|
||||
[14] = XDP_RSS_TYPE_NONE, /* Implicit zero */
|
||||
[15] = XDP_RSS_TYPE_NONE, /* Implicit zero */
|
||||
};
|
||||
|
||||
static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
|
||||
enum xdp_rss_hash_type *rss_type)
|
||||
{
|
||||
const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
|
||||
const struct mlx5_cqe64 *cqe = _ctx->cqe;
|
||||
u32 hash_type, l4_type, ip_type, lookup;
|
||||
|
||||
if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)))
|
||||
return -ENODATA;
|
||||
|
||||
*hash = be32_to_cpu(_ctx->cqe->rss_hash_result);
|
||||
*hash = be32_to_cpu(cqe->rss_hash_result);
|
||||
|
||||
hash_type = cqe->rss_hash_type;
|
||||
BUILD_BUG_ON(CQE_RSS_HTYPE_IP != RSS_L3); /* same mask */
|
||||
ip_type = hash_type & CQE_RSS_HTYPE_IP;
|
||||
l4_type = FIELD_GET(CQE_RSS_HTYPE_L4, hash_type);
|
||||
lookup = ip_type | l4_type;
|
||||
*rss_type = mlx5_xdp_rss_type[lookup];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -628,7 +628,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
|
|||
int i, err, ring;
|
||||
|
||||
if (dev->flags & QLCNIC_NEED_FLR) {
|
||||
pci_reset_function(dev->pdev);
|
||||
err = pci_reset_function(dev->pdev);
|
||||
if (err) {
|
||||
dev_err(&dev->pdev->dev,
|
||||
"Adapter reset failed (%d). Please reboot\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
dev->flags &= ~QLCNIC_NEED_FLR;
|
||||
}
|
||||
|
||||
|
|
|
@ -4522,7 +4522,7 @@ static int niu_alloc_channels(struct niu *np)
|
|||
|
||||
err = niu_rbr_fill(np, rp, GFP_KERNEL);
|
||||
if (err)
|
||||
return err;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include <linux/of.h>
|
||||
#include <linux/of_mdio.h>
|
||||
#include <linux/of_net.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/sys_soc.h>
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
#include <linux/io.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/irqreturn.h>
|
||||
|
@ -23,7 +24,7 @@
|
|||
#include <linux/of.h>
|
||||
#include <linux/of_mdio.h>
|
||||
#include <linux/of_net.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/sys_soc.h>
|
||||
|
|
|
@ -191,7 +191,7 @@
|
|||
#define MAX_ID_PS 2260U
|
||||
#define DEFAULT_ID_PS 2000U
|
||||
|
||||
#define PPM_TO_SUBNS_INC(ppb) div_u64(GENMASK(31, 0) * (ppb) * \
|
||||
#define PPM_TO_SUBNS_INC(ppb) div_u64(GENMASK_ULL(31, 0) * (ppb) * \
|
||||
PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC)
|
||||
|
||||
#define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
|
||||
|
@ -1337,6 +1337,17 @@ no_ptp_support:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void nxp_c45_remove(struct phy_device *phydev)
|
||||
{
|
||||
struct nxp_c45_phy *priv = phydev->priv;
|
||||
|
||||
if (priv->ptp_clock)
|
||||
ptp_clock_unregister(priv->ptp_clock);
|
||||
|
||||
skb_queue_purge(&priv->tx_queue);
|
||||
skb_queue_purge(&priv->rx_queue);
|
||||
}
|
||||
|
||||
static struct phy_driver nxp_c45_driver[] = {
|
||||
{
|
||||
PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
|
||||
|
@ -1359,6 +1370,7 @@ static struct phy_driver nxp_c45_driver[] = {
|
|||
.set_loopback = genphy_c45_loopback,
|
||||
.get_sqi = nxp_c45_get_sqi,
|
||||
.get_sqi_max = nxp_c45_get_sqi_max,
|
||||
.remove = nxp_c45_remove,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -210,6 +210,12 @@ static const enum gpiod_flags gpio_flags[] = {
|
|||
#define SFP_PHY_ADDR 22
|
||||
#define SFP_PHY_ADDR_ROLLBALL 17
|
||||
|
||||
/* SFP_EEPROM_BLOCK_SIZE is the size of data chunk to read the EEPROM
|
||||
* at a time. Some SFP modules and also some Linux I2C drivers do not like
|
||||
* reads longer than 16 bytes.
|
||||
*/
|
||||
#define SFP_EEPROM_BLOCK_SIZE 16
|
||||
|
||||
struct sff_data {
|
||||
unsigned int gpios;
|
||||
bool (*module_supported)(const struct sfp_eeprom_id *id);
|
||||
|
@ -1951,11 +1957,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
|
|||
u8 check;
|
||||
int ret;
|
||||
|
||||
/* Some SFP modules and also some Linux I2C drivers do not like reads
|
||||
* longer than 16 bytes, so read the EEPROM in chunks of 16 bytes at
|
||||
* a time.
|
||||
*/
|
||||
sfp->i2c_block_size = 16;
|
||||
sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
|
||||
|
||||
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
|
||||
if (ret < 0) {
|
||||
|
@ -2513,6 +2515,9 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
|
|||
unsigned int first, last, len;
|
||||
int ret;
|
||||
|
||||
if (!(sfp->state & SFP_F_PRESENT))
|
||||
return -ENODEV;
|
||||
|
||||
if (ee->len == 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -2545,6 +2550,9 @@ static int sfp_module_eeprom_by_page(struct sfp *sfp,
|
|||
const struct ethtool_module_eeprom *page,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
if (!(sfp->state & SFP_F_PRESENT))
|
||||
return -ENODEV;
|
||||
|
||||
if (page->bank) {
|
||||
NL_SET_ERR_MSG(extack, "Banks not supported");
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -2649,6 +2657,7 @@ static struct sfp *sfp_alloc(struct device *dev)
|
|||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
sfp->dev = dev;
|
||||
sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
|
||||
|
||||
mutex_init(&sfp->sm_mutex);
|
||||
mutex_init(&sfp->st_mutex);
|
||||
|
|
|
@ -1943,7 +1943,7 @@ static struct rx_agg *alloc_rx_agg(struct r8152 *tp, gfp_t mflags)
|
|||
if (!rx_agg)
|
||||
return NULL;
|
||||
|
||||
rx_agg->page = alloc_pages(mflags | __GFP_COMP, order);
|
||||
rx_agg->page = alloc_pages(mflags | __GFP_COMP | __GFP_NOWARN, order);
|
||||
if (!rx_agg->page)
|
||||
goto free_rx;
|
||||
|
||||
|
|
|
@ -1648,14 +1648,18 @@ static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
|
||||
static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
|
||||
enum xdp_rss_hash_type *rss_type)
|
||||
{
|
||||
struct veth_xdp_buff *_ctx = (void *)ctx;
|
||||
struct sk_buff *skb = _ctx->skb;
|
||||
|
||||
if (!_ctx->skb)
|
||||
if (!skb)
|
||||
return -ENODATA;
|
||||
|
||||
*hash = skb_get_hash(_ctx->skb);
|
||||
*hash = skb_get_hash(skb);
|
||||
*rss_type = skb->l4_hash ? XDP_RSS_TYPE_L4_ANY : XDP_RSS_TYPE_NONE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -295,7 +295,7 @@ static int ipc_pcie_probe(struct pci_dev *pci,
|
|||
ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64));
|
||||
if (ret) {
|
||||
dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret);
|
||||
return ret;
|
||||
goto set_mask_fail;
|
||||
}
|
||||
|
||||
ipc_pcie_config_aspm(ipc_pcie);
|
||||
|
@ -323,6 +323,7 @@ static int ipc_pcie_probe(struct pci_dev *pci,
|
|||
imem_init_fail:
|
||||
ipc_pcie_resources_release(ipc_pcie);
|
||||
resources_req_fail:
|
||||
set_mask_fail:
|
||||
pci_disable_device(pci);
|
||||
pci_enable_fail:
|
||||
kfree(ipc_pcie);
|
||||
|
|
|
@ -1674,6 +1674,9 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
|
|||
struct request_queue *queue = disk->queue;
|
||||
u32 size = queue_logical_block_size(queue);
|
||||
|
||||
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
|
||||
ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
|
||||
|
||||
if (ctrl->max_discard_sectors == 0) {
|
||||
blk_queue_max_discard_sectors(queue, 0);
|
||||
return;
|
||||
|
@ -1688,9 +1691,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
|
|||
if (queue->limits.max_discard_sectors)
|
||||
return;
|
||||
|
||||
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
|
||||
ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
|
||||
|
||||
blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
|
||||
blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
|
||||
|
||||
|
|
|
@ -226,6 +226,7 @@ static void __of_attach_node(struct device_node *np)
|
|||
np->sibling = np->parent->child;
|
||||
np->parent->child = np;
|
||||
of_node_clear_flag(np, OF_DETACHED);
|
||||
np->fwnode.flags |= FWNODE_FLAG_NOT_DEVICE;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -737,6 +737,11 @@ static int of_platform_notify(struct notifier_block *nb,
|
|||
if (of_node_check_flag(rd->dn, OF_POPULATED))
|
||||
return NOTIFY_OK;
|
||||
|
||||
/*
|
||||
* Clear the flag before adding the device so that fw_devlink
|
||||
* doesn't skip adding consumers to this device.
|
||||
*/
|
||||
rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
|
||||
/* pdev_parent may be NULL when no bus platform device */
|
||||
pdev_parent = of_find_device_by_node(rd->dn->parent);
|
||||
pdev = of_platform_device_create(rd->dn, NULL,
|
||||
|
|
|
@ -128,7 +128,7 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
|
|||
return -EIO;
|
||||
|
||||
/* Length is 2 DW of header + length of payload in DW */
|
||||
length = 2 + task->request_pl_sz / sizeof(u32);
|
||||
length = 2 + task->request_pl_sz / sizeof(__le32);
|
||||
if (length > PCI_DOE_MAX_LENGTH)
|
||||
return -EIO;
|
||||
if (length == PCI_DOE_MAX_LENGTH)
|
||||
|
@ -141,9 +141,9 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
|
|||
pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
|
||||
FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
|
||||
length));
|
||||
for (i = 0; i < task->request_pl_sz / sizeof(u32); i++)
|
||||
for (i = 0; i < task->request_pl_sz / sizeof(__le32); i++)
|
||||
pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
|
||||
task->request_pl[i]);
|
||||
le32_to_cpu(task->request_pl[i]));
|
||||
|
||||
pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO);
|
||||
|
||||
|
@ -195,11 +195,11 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
|
|||
|
||||
/* First 2 dwords have already been read */
|
||||
length -= 2;
|
||||
payload_length = min(length, task->response_pl_sz / sizeof(u32));
|
||||
payload_length = min(length, task->response_pl_sz / sizeof(__le32));
|
||||
/* Read the rest of the response payload */
|
||||
for (i = 0; i < payload_length; i++) {
|
||||
pci_read_config_dword(pdev, offset + PCI_DOE_READ,
|
||||
&task->response_pl[i]);
|
||||
pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
|
||||
task->response_pl[i] = cpu_to_le32(val);
|
||||
/* Prior to the last ack, ensure Data Object Ready */
|
||||
if (i == (payload_length - 1) && !pci_doe_data_obj_ready(doe_mb))
|
||||
return -EIO;
|
||||
|
@ -217,13 +217,14 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
|
|||
if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
|
||||
return -EIO;
|
||||
|
||||
return min(length, task->response_pl_sz / sizeof(u32)) * sizeof(u32);
|
||||
return min(length, task->response_pl_sz / sizeof(__le32)) * sizeof(__le32);
|
||||
}
|
||||
|
||||
static void signal_task_complete(struct pci_doe_task *task, int rv)
|
||||
{
|
||||
task->rv = rv;
|
||||
task->complete(task);
|
||||
destroy_work_on_stack(&task->work);
|
||||
}
|
||||
|
||||
static void signal_task_abort(struct pci_doe_task *task, int rv)
|
||||
|
@ -317,14 +318,16 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
|
|||
{
|
||||
u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
|
||||
*index);
|
||||
__le32 request_pl_le = cpu_to_le32(request_pl);
|
||||
__le32 response_pl_le;
|
||||
u32 response_pl;
|
||||
DECLARE_COMPLETION_ONSTACK(c);
|
||||
struct pci_doe_task task = {
|
||||
.prot.vid = PCI_VENDOR_ID_PCI_SIG,
|
||||
.prot.type = PCI_DOE_PROTOCOL_DISCOVERY,
|
||||
.request_pl = &request_pl,
|
||||
.request_pl = &request_pl_le,
|
||||
.request_pl_sz = sizeof(request_pl),
|
||||
.response_pl = &response_pl,
|
||||
.response_pl = &response_pl_le,
|
||||
.response_pl_sz = sizeof(response_pl),
|
||||
.complete = pci_doe_task_complete,
|
||||
.private = &c,
|
||||
|
@ -340,6 +343,7 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
|
|||
if (task.rv != sizeof(response_pl))
|
||||
return -EIO;
|
||||
|
||||
response_pl = le32_to_cpu(response_pl_le);
|
||||
*vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
|
||||
*protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
|
||||
response_pl);
|
||||
|
@ -520,6 +524,8 @@ EXPORT_SYMBOL_GPL(pci_doe_supports_prot);
|
|||
* task->complete will be called when the state machine is done processing this
|
||||
* task.
|
||||
*
|
||||
* @task must be allocated on the stack.
|
||||
*
|
||||
* Excess data will be discarded.
|
||||
*
|
||||
* RETURNS: 0 when task has been successfully queued, -ERRNO on error
|
||||
|
@ -533,15 +539,15 @@ int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
|
|||
* DOE requests must be a whole number of DW and the response needs to
|
||||
* be big enough for at least 1 DW
|
||||
*/
|
||||
if (task->request_pl_sz % sizeof(u32) ||
|
||||
task->response_pl_sz < sizeof(u32))
|
||||
if (task->request_pl_sz % sizeof(__le32) ||
|
||||
task->response_pl_sz < sizeof(__le32))
|
||||
return -EINVAL;
|
||||
|
||||
if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
|
||||
return -EIO;
|
||||
|
||||
task->doe_mb = doe_mb;
|
||||
INIT_WORK(&task->work, doe_statemachine_work);
|
||||
INIT_WORK_ONSTACK(&task->work, doe_statemachine_work);
|
||||
queue_work(doe_mb->work_queue, &task->work);
|
||||
return 0;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue