Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c - keep the ZC code, drop the code related to reinit net/bridge/netfilter/ebtables.c - fix build after move to net_generic Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
8203c7ce4e
7
.mailmap
7
.mailmap
|
@ -168,6 +168,7 @@ Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
|
|||
Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
|
||||
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
|
||||
John Stultz <johnstul@us.ibm.com>
|
||||
Jordan Crouse <jordan@cosmicpenguin.net> <jcrouse@codeaurora.org>
|
||||
<josh@joshtriplett.org> <josh@freedesktop.org>
|
||||
<josh@joshtriplett.org> <josh@kernel.org>
|
||||
<josh@joshtriplett.org> <josht@linux.vnet.ibm.com>
|
||||
|
@ -253,8 +254,14 @@ Morten Welinder <welinder@anemone.rentec.com>
|
|||
Morten Welinder <welinder@darter.rentec.com>
|
||||
Morten Welinder <welinder@troll.com>
|
||||
Mythri P K <mythripk@ti.com>
|
||||
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
|
||||
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
|
||||
Nguyen Anh Quynh <aquynh@gmail.com>
|
||||
Nicholas Piggin <npiggin@gmail.com> <npiggen@suse.de>
|
||||
Nicholas Piggin <npiggin@gmail.com> <npiggin@kernel.dk>
|
||||
Nicholas Piggin <npiggin@gmail.com> <npiggin@suse.de>
|
||||
Nicholas Piggin <npiggin@gmail.com> <nickpiggin@yahoo.com.au>
|
||||
Nicholas Piggin <npiggin@gmail.com> <piggin@cyberone.com.au>
|
||||
Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com>
|
||||
Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
|
||||
Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
What: /sys/kernel/debug/moxtet/input
|
||||
Date: March 2019
|
||||
KernelVersion: 5.3
|
||||
Contact: Marek Behún <marek.behun@nic.cz>
|
||||
Contact: Marek Behún <kabel@kernel.org>
|
||||
Description: (Read) Read input from the shift registers, in hexadecimal.
|
||||
Returns N+1 bytes, where N is the number of Moxtet connected
|
||||
modules. The first byte is from the CPU board itself.
|
||||
|
@ -19,7 +19,7 @@ Description: (Read) Read input from the shift registers, in hexadecimal.
|
|||
What: /sys/kernel/debug/moxtet/output
|
||||
Date: March 2019
|
||||
KernelVersion: 5.3
|
||||
Contact: Marek Behún <marek.behun@nic.cz>
|
||||
Contact: Marek Behún <kabel@kernel.org>
|
||||
Description: (RW) Read last written value to the shift registers, in
|
||||
hexadecimal, or write values to the shift registers, also
|
||||
in hexadecimal.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
What: /sys/kernel/debug/turris-mox-rwtm/do_sign
|
||||
Date: Jun 2020
|
||||
KernelVersion: 5.8
|
||||
Contact: Marek Behún <marek.behun@nic.cz>
|
||||
Contact: Marek Behún <kabel@kernel.org>
|
||||
Description:
|
||||
|
||||
======= ===========================================================
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
What: /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_description
|
||||
Date: March 2019
|
||||
KernelVersion: 5.3
|
||||
Contact: Marek Behún <marek.behun@nic.cz>
|
||||
Contact: Marek Behún <kabel@kernel.org>
|
||||
Description: (Read) Moxtet module description. Format: string
|
||||
|
||||
What: /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_id
|
||||
Date: March 2019
|
||||
KernelVersion: 5.3
|
||||
Contact: Marek Behún <marek.behun@nic.cz>
|
||||
Contact: Marek Behún <kabel@kernel.org>
|
||||
Description: (Read) Moxtet module ID. Format: %x
|
||||
|
||||
What: /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_name
|
||||
Date: March 2019
|
||||
KernelVersion: 5.3
|
||||
Contact: Marek Behún <marek.behun@nic.cz>
|
||||
Contact: Marek Behún <kabel@kernel.org>
|
||||
Description: (Read) Moxtet module name. Format: string
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
What: /sys/class/leds/<led>/device/brightness
|
||||
Date: July 2020
|
||||
KernelVersion: 5.9
|
||||
Contact: Marek Behún <marek.behun@nic.cz>
|
||||
Contact: Marek Behún <kabel@kernel.org>
|
||||
Description: (RW) On the front panel of the Turris Omnia router there is also
|
||||
a button which can be used to control the intensity of all the
|
||||
LEDs at once, so that if they are too bright, user can dim them.
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
What: /sys/firmware/turris-mox-rwtm/board_version
|
||||
Date: August 2019
|
||||
KernelVersion: 5.4
|
||||
Contact: Marek Behún <marek.behun@nic.cz>
|
||||
Contact: Marek Behún <kabel@kernel.org>
|
||||
Description: (Read) Board version burned into eFuses of this Turris Mox board.
|
||||
Format: %i
|
||||
|
||||
What: /sys/firmware/turris-mox-rwtm/mac_address*
|
||||
Date: August 2019
|
||||
KernelVersion: 5.4
|
||||
Contact: Marek Behún <marek.behun@nic.cz>
|
||||
Contact: Marek Behún <kabel@kernel.org>
|
||||
Description: (Read) MAC addresses burned into eFuses of this Turris Mox board.
|
||||
Format: %pM
|
||||
|
||||
What: /sys/firmware/turris-mox-rwtm/pubkey
|
||||
Date: August 2019
|
||||
KernelVersion: 5.4
|
||||
Contact: Marek Behún <marek.behun@nic.cz>
|
||||
Contact: Marek Behún <kabel@kernel.org>
|
||||
Description: (Read) ECDSA public key (in pubkey hex compressed form) computed
|
||||
as pair to the ECDSA private key burned into eFuses of this
|
||||
Turris Mox Board.
|
||||
|
@ -24,7 +24,7 @@ Description: (Read) ECDSA public key (in pubkey hex compressed form) computed
|
|||
What: /sys/firmware/turris-mox-rwtm/ram_size
|
||||
Date: August 2019
|
||||
KernelVersion: 5.4
|
||||
Contact: Marek Behún <marek.behun@nic.cz>
|
||||
Contact: Marek Behún <kabel@kernel.org>
|
||||
Description: (Read) RAM size in MiB of this Turris Mox board as was detected
|
||||
during manufacturing and burned into eFuses. Can be 512 or 1024.
|
||||
Format: %i
|
||||
|
@ -32,6 +32,6 @@ Description: (Read) RAM size in MiB of this Turris Mox board as was detected
|
|||
What: /sys/firmware/turris-mox-rwtm/serial_number
|
||||
Date: August 2019
|
||||
KernelVersion: 5.4
|
||||
Contact: Marek Behún <marek.behun@nic.cz>
|
||||
Contact: Marek Behún <kabel@kernel.org>
|
||||
Description: (Read) Serial number burned into eFuses of this Turris Mox device.
|
||||
Format: %016X
|
||||
|
|
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: Bindings for GPIO bitbanged I2C
|
||||
|
||||
maintainers:
|
||||
- Wolfram Sang <wolfram@the-dreams.de>
|
||||
- Wolfram Sang <wsa@kernel.org>
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/i2c/i2c-controller.yaml#
|
||||
|
|
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: Freescale Inter IC (I2C) and High Speed Inter IC (HS-I2C) for i.MX
|
||||
|
||||
maintainers:
|
||||
- Wolfram Sang <wolfram@the-dreams.de>
|
||||
- Oleksij Rempel <o.rempel@pengutronix.de>
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/i2c/i2c-controller.yaml#
|
||||
|
|
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: CZ.NIC's Turris Omnia LEDs driver
|
||||
|
||||
maintainers:
|
||||
- Marek Behún <marek.behun@nic.cz>
|
||||
- Marek Behún <kabel@kernel.org>
|
||||
|
||||
description:
|
||||
This module adds support for the RGB LEDs found on the front panel of the
|
||||
|
|
|
@ -1857,21 +1857,6 @@ ip6frag_low_thresh - INTEGER
|
|||
ip6frag_time - INTEGER
|
||||
Time in seconds to keep an IPv6 fragment in memory.
|
||||
|
||||
IPv6 Segment Routing:
|
||||
|
||||
seg6_flowlabel - INTEGER
|
||||
Controls the behaviour of computing the flowlabel of outer
|
||||
IPv6 header in case of SR T.encaps
|
||||
|
||||
== =======================================================
|
||||
-1 set flowlabel to zero.
|
||||
0 copy flowlabel from Inner packet in case of Inner IPv6
|
||||
(Set flowlabel to 0 in case IPv4/L2)
|
||||
1 Compute the flowlabel using seg6_make_flowlabel()
|
||||
== =======================================================
|
||||
|
||||
Default is 0.
|
||||
|
||||
``conf/default/*``:
|
||||
Change the interface-specific default settings.
|
||||
|
||||
|
|
|
@ -24,3 +24,16 @@ seg6_require_hmac - INTEGER
|
|||
* 1 - Drop SR packets without HMAC, validate SR packets with HMAC
|
||||
|
||||
Default is 0.
|
||||
|
||||
seg6_flowlabel - INTEGER
|
||||
Controls the behaviour of computing the flowlabel of outer
|
||||
IPv6 header in case of SR T.encaps
|
||||
|
||||
== =======================================================
|
||||
-1 set flowlabel to zero.
|
||||
0 copy flowlabel from Inner packet in case of Inner IPv6
|
||||
(Set flowlabel to 0 in case IPv4/L2)
|
||||
1 Compute the flowlabel using seg6_make_flowlabel()
|
||||
== =======================================================
|
||||
|
||||
Default is 0.
|
||||
|
|
17
MAINTAINERS
17
MAINTAINERS
|
@ -1792,19 +1792,26 @@ F: drivers/net/ethernet/cortina/
|
|||
F: drivers/pinctrl/pinctrl-gemini.c
|
||||
F: drivers/rtc/rtc-ftrtc010.c
|
||||
|
||||
ARM/CZ.NIC TURRIS MOX SUPPORT
|
||||
M: Marek Behun <marek.behun@nic.cz>
|
||||
ARM/CZ.NIC TURRIS SUPPORT
|
||||
M: Marek Behun <kabel@kernel.org>
|
||||
S: Maintained
|
||||
W: http://mox.turris.cz
|
||||
W: https://www.turris.cz/
|
||||
F: Documentation/ABI/testing/debugfs-moxtet
|
||||
F: Documentation/ABI/testing/sysfs-bus-moxtet-devices
|
||||
F: Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm
|
||||
F: Documentation/devicetree/bindings/bus/moxtet.txt
|
||||
F: Documentation/devicetree/bindings/firmware/cznic,turris-mox-rwtm.txt
|
||||
F: Documentation/devicetree/bindings/gpio/gpio-moxtet.txt
|
||||
F: Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml
|
||||
F: Documentation/devicetree/bindings/watchdog/armada-37xx-wdt.txt
|
||||
F: drivers/bus/moxtet.c
|
||||
F: drivers/firmware/turris-mox-rwtm.c
|
||||
F: drivers/leds/leds-turris-omnia.c
|
||||
F: drivers/mailbox/armada-37xx-rwtm-mailbox.c
|
||||
F: drivers/gpio/gpio-moxtet.c
|
||||
F: drivers/watchdog/armada_37xx_wdt.c
|
||||
F: include/dt-bindings/bus/moxtet.h
|
||||
F: include/linux/armada-37xx-rwtm-mailbox.h
|
||||
F: include/linux/moxtet.h
|
||||
|
||||
ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6)
|
||||
|
@ -7093,7 +7100,7 @@ S: Maintained
|
|||
F: drivers/i2c/busses/i2c-cpm.c
|
||||
|
||||
FREESCALE IMX / MXC FEC DRIVER
|
||||
M: Fugang Duan <fugang.duan@nxp.com>
|
||||
M: Joakim Zhang <qiangqing.zhang@nxp.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/fsl-fec.txt
|
||||
|
@ -8521,9 +8528,9 @@ F: drivers/pci/hotplug/rpaphp*
|
|||
|
||||
IBM Power SRIOV Virtual NIC Device Driver
|
||||
M: Dany Madden <drt@linux.ibm.com>
|
||||
M: Lijun Pan <ljp@linux.ibm.com>
|
||||
M: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
|
||||
R: Thomas Falcon <tlfalcon@linux.ibm.com>
|
||||
R: Lijun Pan <lijunp213@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/ibm/ibmvnic.*
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Frozen Wasteland
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -1406,10 +1406,13 @@ config ARM64_PAN
|
|||
config AS_HAS_LDAPR
|
||||
def_bool $(as-instr,.arch_extension rcpc)
|
||||
|
||||
config AS_HAS_LSE_ATOMICS
|
||||
def_bool $(as-instr,.arch_extension lse)
|
||||
|
||||
config ARM64_LSE_ATOMICS
|
||||
bool
|
||||
default ARM64_USE_LSE_ATOMICS
|
||||
depends on $(as-instr,.arch_extension lse)
|
||||
depends on AS_HAS_LSE_ATOMICS
|
||||
|
||||
config ARM64_USE_LSE_ATOMICS
|
||||
bool "Atomic instructions"
|
||||
|
@ -1666,6 +1669,7 @@ config ARM64_MTE
|
|||
default y
|
||||
depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
|
||||
depends on AS_HAS_ARMV8_5
|
||||
depends on AS_HAS_LSE_ATOMICS
|
||||
# Required for tag checking in the uaccess routines
|
||||
depends on ARM64_PAN
|
||||
select ARCH_USES_HIGH_VMA_FLAGS
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
|
||||
/*
|
||||
* Device Tree file for CZ.NIC Turris Mox Board
|
||||
* 2019 by Marek Behun <marek.behun@nic.cz>
|
||||
* 2019 by Marek Behún <kabel@kernel.org>
|
||||
*/
|
||||
|
||||
/dts-v1/;
|
||||
|
|
|
@ -97,9 +97,9 @@
|
|||
.popsection
|
||||
.subsection 1
|
||||
663: \insn2
|
||||
664: .previous
|
||||
.org . - (664b-663b) + (662b-661b)
|
||||
664: .org . - (664b-663b) + (662b-661b)
|
||||
.org . - (662b-661b) + (664b-663b)
|
||||
.previous
|
||||
.endif
|
||||
.endm
|
||||
|
||||
|
@ -169,11 +169,11 @@
|
|||
*/
|
||||
.macro alternative_endif
|
||||
664:
|
||||
.org . - (664b-663b) + (662b-661b)
|
||||
.org . - (662b-661b) + (664b-663b)
|
||||
.if .Lasm_alt_mode==0
|
||||
.previous
|
||||
.endif
|
||||
.org . - (664b-663b) + (662b-661b)
|
||||
.org . - (662b-661b) + (664b-663b)
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
|
|
@ -53,7 +53,7 @@ static inline unsigned long find_zero(unsigned long mask)
|
|||
*/
|
||||
static inline unsigned long load_unaligned_zeropad(const void *addr)
|
||||
{
|
||||
unsigned long ret, offset;
|
||||
unsigned long ret, tmp;
|
||||
|
||||
/* Load word from unaligned pointer addr */
|
||||
asm(
|
||||
|
@ -61,9 +61,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
|
|||
"2:\n"
|
||||
" .pushsection .fixup,\"ax\"\n"
|
||||
" .align 2\n"
|
||||
"3: and %1, %2, #0x7\n"
|
||||
" bic %2, %2, #0x7\n"
|
||||
" ldr %0, [%2]\n"
|
||||
"3: bic %1, %2, #0x7\n"
|
||||
" ldr %0, [%1]\n"
|
||||
" and %1, %2, #0x7\n"
|
||||
" lsl %1, %1, #0x3\n"
|
||||
#ifndef __AARCH64EB__
|
||||
" lsr %0, %0, %1\n"
|
||||
|
@ -73,7 +73,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
|
|||
" b 2b\n"
|
||||
" .popsection\n"
|
||||
_ASM_EXTABLE(1b, 3b)
|
||||
: "=&r" (ret), "=&r" (offset)
|
||||
: "=&r" (ret), "=&r" (tmp)
|
||||
: "r" (addr), "Q" (*(unsigned long *)addr));
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -148,16 +148,18 @@ alternative_cb_end
|
|||
.endm
|
||||
|
||||
/* Check for MTE asynchronous tag check faults */
|
||||
.macro check_mte_async_tcf, flgs, tmp
|
||||
.macro check_mte_async_tcf, tmp, ti_flags
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
.arch_extension lse
|
||||
alternative_if_not ARM64_MTE
|
||||
b 1f
|
||||
alternative_else_nop_endif
|
||||
mrs_s \tmp, SYS_TFSRE0_EL1
|
||||
tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
|
||||
/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
|
||||
orr \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT
|
||||
str \flgs, [tsk, #TSK_TI_FLAGS]
|
||||
mov \tmp, #_TIF_MTE_ASYNC_FAULT
|
||||
add \ti_flags, tsk, #TSK_TI_FLAGS
|
||||
stset \tmp, [\ti_flags]
|
||||
msr_s SYS_TFSRE0_EL1, xzr
|
||||
1:
|
||||
#endif
|
||||
|
@ -244,7 +246,7 @@ alternative_else_nop_endif
|
|||
disable_step_tsk x19, x20
|
||||
|
||||
/* Check for asynchronous tag check faults in user space */
|
||||
check_mte_async_tcf x19, x22
|
||||
check_mte_async_tcf x22, x23
|
||||
apply_ssbd 1, x22, x23
|
||||
|
||||
ptrauth_keys_install_kernel tsk, x20, x22, x23
|
||||
|
|
|
@ -267,10 +267,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
|
|||
if (!instruction_pointer(regs))
|
||||
BUG();
|
||||
|
||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||
if (kcb->kprobe_status == KPROBE_REENTER) {
|
||||
restore_previous_kprobe(kcb);
|
||||
else
|
||||
} else {
|
||||
kprobes_restore_local_irqflag(kcb, regs);
|
||||
reset_current_kprobe();
|
||||
}
|
||||
|
||||
break;
|
||||
case KPROBE_HIT_ACTIVE:
|
||||
|
|
|
@ -134,7 +134,7 @@ SYM_FUNC_START(_cpu_resume)
|
|||
*/
|
||||
bl cpu_do_resume
|
||||
|
||||
#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
|
||||
#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
|
||||
mov x0, sp
|
||||
bl kasan_unpoison_task_stack_below
|
||||
#endif
|
||||
|
|
|
@ -314,7 +314,7 @@ config FORCE_MAX_ZONEORDER
|
|||
int "Maximum zone order"
|
||||
default "11"
|
||||
|
||||
config RAM_BASE
|
||||
config DRAM_BASE
|
||||
hex "DRAM start addr (the same with memory-section in dts)"
|
||||
default 0x0
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#define SSEG_SIZE 0x20000000
|
||||
#define LOWMEM_LIMIT (SSEG_SIZE * 2)
|
||||
|
||||
#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1))
|
||||
#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
|
|
@ -55,8 +55,6 @@ CONFIG_CHR_DEV_SG=m
|
|||
CONFIG_SCSI_FC_ATTRS=y
|
||||
CONFIG_SCSI_SYM53C8XX_2=y
|
||||
CONFIG_SCSI_QLOGIC_1280=y
|
||||
CONFIG_ATA=y
|
||||
CONFIG_ATA_PIIX=y
|
||||
CONFIG_SATA_VITESSE=y
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_MD=m
|
||||
|
|
|
@ -54,8 +54,7 @@
|
|||
|
||||
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
|
||||
{
|
||||
/* FIXME: should this be bspstore + nr_dirty regs? */
|
||||
return regs->ar_bspstore;
|
||||
return regs->r12;
|
||||
}
|
||||
|
||||
static inline int is_syscall_success(struct pt_regs *regs)
|
||||
|
@ -79,11 +78,6 @@ static inline long regs_return_value(struct pt_regs *regs)
|
|||
unsigned long __ip = instruction_pointer(regs); \
|
||||
(__ip & ~3UL) + ((__ip & 3UL) << 2); \
|
||||
})
|
||||
/*
|
||||
* Why not default? Because user_stack_pointer() on ia64 gives register
|
||||
* stack backing store instead...
|
||||
*/
|
||||
#define current_user_stack_pointer() (current_pt_regs()->r12)
|
||||
|
||||
/* given a pointer to a task_struct, return the user's pt_regs */
|
||||
# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
|
||||
|
|
|
@ -95,7 +95,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
|
|||
* acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
|
||||
* called yet. Note that node 0 will also count all non-existent cpus.
|
||||
*/
|
||||
static int __meminit early_nr_cpus_node(int node)
|
||||
static int early_nr_cpus_node(int node)
|
||||
{
|
||||
int cpu, n = 0;
|
||||
|
||||
|
@ -110,7 +110,7 @@ static int __meminit early_nr_cpus_node(int node)
|
|||
* compute_pernodesize - compute size of pernode data
|
||||
* @node: the node id.
|
||||
*/
|
||||
static unsigned long __meminit compute_pernodesize(int node)
|
||||
static unsigned long compute_pernodesize(int node)
|
||||
{
|
||||
unsigned long pernodesize = 0, cpus;
|
||||
|
||||
|
@ -367,7 +367,7 @@ static void __init reserve_pernode_space(void)
|
|||
}
|
||||
}
|
||||
|
||||
static void __meminit scatter_node_data(void)
|
||||
static void scatter_node_data(void)
|
||||
{
|
||||
pg_data_t **dst;
|
||||
int node;
|
||||
|
|
|
@ -167,7 +167,7 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
|
|||
((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
|
||||
})
|
||||
#else
|
||||
#define ARCH_PFN_OFFSET (m68k_memory[0].addr)
|
||||
#define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT)
|
||||
#include <asm-generic/memory_model.h>
|
||||
#endif
|
||||
|
||||
|
|
|
@ -238,7 +238,7 @@ void flush_dcache_page(struct page *page)
|
|||
{
|
||||
struct address_space *mapping;
|
||||
|
||||
mapping = page_mapping(page);
|
||||
mapping = page_mapping_file(page);
|
||||
if (mapping && !mapping_mapped(mapping))
|
||||
set_bit(PG_dcache_dirty, &page->flags);
|
||||
else {
|
||||
|
|
|
@ -191,3 +191,7 @@ $(obj)/prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o FORCE
|
|||
targets += prom_init_check
|
||||
|
||||
clean-files := vmlinux.lds
|
||||
|
||||
# Force dependency (incbin is bad)
|
||||
$(obj)/vdso32_wrapper.o : $(obj)/vdso32/vdso32.so.dbg
|
||||
$(obj)/vdso64_wrapper.o : $(obj)/vdso64/vdso64.so.dbg
|
||||
|
|
|
@ -6,11 +6,11 @@
|
|||
CFLAGS_ptrace-view.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
|
||||
|
||||
obj-y += ptrace.o ptrace-view.o
|
||||
obj-$(CONFIG_PPC_FPU_REGS) += ptrace-fpu.o
|
||||
obj-y += ptrace-fpu.o
|
||||
obj-$(CONFIG_COMPAT) += ptrace32.o
|
||||
obj-$(CONFIG_VSX) += ptrace-vsx.o
|
||||
ifneq ($(CONFIG_VSX),y)
|
||||
obj-$(CONFIG_PPC_FPU_REGS) += ptrace-novsx.o
|
||||
obj-y += ptrace-novsx.o
|
||||
endif
|
||||
obj-$(CONFIG_ALTIVEC) += ptrace-altivec.o
|
||||
obj-$(CONFIG_SPE) += ptrace-spe.o
|
||||
|
|
|
@ -165,22 +165,8 @@ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data);
|
|||
extern const struct user_regset_view user_ppc_native_view;
|
||||
|
||||
/* ptrace-fpu */
|
||||
#ifdef CONFIG_PPC_FPU_REGS
|
||||
int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data);
|
||||
int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data);
|
||||
#else
|
||||
static inline int
|
||||
ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static inline int
|
||||
ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ptrace-(no)adv */
|
||||
void ppc_gethwdinfo(struct ppc_debug_info *dbginfo);
|
||||
|
|
|
@ -8,32 +8,42 @@
|
|||
|
||||
int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
|
||||
{
|
||||
#ifdef CONFIG_PPC_FPU_REGS
|
||||
unsigned int fpidx = index - PT_FPR0;
|
||||
#endif
|
||||
|
||||
if (index > PT_FPSCR)
|
||||
return -EIO;
|
||||
|
||||
#ifdef CONFIG_PPC_FPU_REGS
|
||||
flush_fp_to_thread(child);
|
||||
if (fpidx < (PT_FPSCR - PT_FPR0))
|
||||
memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long));
|
||||
else
|
||||
*data = child->thread.fp_state.fpscr;
|
||||
#else
|
||||
*data = 0;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
|
||||
{
|
||||
#ifdef CONFIG_PPC_FPU_REGS
|
||||
unsigned int fpidx = index - PT_FPR0;
|
||||
#endif
|
||||
|
||||
if (index > PT_FPSCR)
|
||||
return -EIO;
|
||||
|
||||
#ifdef CONFIG_PPC_FPU_REGS
|
||||
flush_fp_to_thread(child);
|
||||
if (fpidx < (PT_FPSCR - PT_FPR0))
|
||||
memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
|
||||
else
|
||||
child->thread.fp_state.fpscr = data;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -21,12 +21,16 @@
|
|||
int fpr_get(struct task_struct *target, const struct user_regset *regset,
|
||||
struct membuf to)
|
||||
{
|
||||
#ifdef CONFIG_PPC_FPU_REGS
|
||||
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
|
||||
offsetof(struct thread_fp_state, fpr[32]));
|
||||
|
||||
flush_fp_to_thread(target);
|
||||
|
||||
return membuf_write(&to, &target->thread.fp_state, 33 * sizeof(u64));
|
||||
#else
|
||||
return membuf_write(&to, &empty_zero_page, 33 * sizeof(u64));
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -46,6 +50,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset,
|
|||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
#ifdef CONFIG_PPC_FPU_REGS
|
||||
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
|
||||
offsetof(struct thread_fp_state, fpr[32]));
|
||||
|
||||
|
@ -53,4 +58,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset,
|
|||
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fp_state, 0, -1);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -522,13 +522,11 @@ static const struct user_regset native_regsets[] = {
|
|||
.size = sizeof(long), .align = sizeof(long),
|
||||
.regset_get = gpr_get, .set = gpr_set
|
||||
},
|
||||
#ifdef CONFIG_PPC_FPU_REGS
|
||||
[REGSET_FPR] = {
|
||||
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
|
||||
.size = sizeof(double), .align = sizeof(double),
|
||||
.regset_get = fpr_get, .set = fpr_set
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
[REGSET_VMX] = {
|
||||
.core_note_type = NT_PPC_VMX, .n = 34,
|
||||
|
|
|
@ -775,7 +775,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
|
|||
else
|
||||
prepare_save_user_regs(1);
|
||||
|
||||
if (!user_write_access_begin(frame, sizeof(*frame)))
|
||||
if (!user_access_begin(frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
|
||||
/* Put the siginfo & fill in most of the ucontext */
|
||||
|
@ -809,17 +809,15 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
|
|||
unsafe_put_user(PPC_INST_ADDI + __NR_rt_sigreturn, &mctx->mc_pad[0],
|
||||
failed);
|
||||
unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
|
||||
asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
|
||||
}
|
||||
unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
|
||||
|
||||
user_write_access_end();
|
||||
user_access_end();
|
||||
|
||||
if (copy_siginfo_to_user(&frame->info, &ksig->info))
|
||||
goto badframe;
|
||||
|
||||
if (tramp == (unsigned long)mctx->mc_pad)
|
||||
flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
|
||||
|
||||
regs->link = tramp;
|
||||
|
||||
#ifdef CONFIG_PPC_FPU_REGS
|
||||
|
@ -844,7 +842,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
|
|||
return 0;
|
||||
|
||||
failed:
|
||||
user_write_access_end();
|
||||
user_access_end();
|
||||
|
||||
badframe:
|
||||
signal_fault(tsk, regs, "handle_rt_signal32", frame);
|
||||
|
@ -879,7 +877,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
|
|||
else
|
||||
prepare_save_user_regs(1);
|
||||
|
||||
if (!user_write_access_begin(frame, sizeof(*frame)))
|
||||
if (!user_access_begin(frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
sc = (struct sigcontext __user *) &frame->sctx;
|
||||
|
||||
|
@ -908,11 +906,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
|
|||
/* Set up the sigreturn trampoline: li r0,sigret; sc */
|
||||
unsafe_put_user(PPC_INST_ADDI + __NR_sigreturn, &mctx->mc_pad[0], failed);
|
||||
unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
|
||||
asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
|
||||
}
|
||||
user_write_access_end();
|
||||
|
||||
if (tramp == (unsigned long)mctx->mc_pad)
|
||||
flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
|
||||
user_access_end();
|
||||
|
||||
regs->link = tramp;
|
||||
|
||||
|
@ -935,7 +931,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
|
|||
return 0;
|
||||
|
||||
failed:
|
||||
user_write_access_end();
|
||||
user_access_end();
|
||||
|
||||
badframe:
|
||||
signal_fault(tsk, regs, "handle_signal32", frame);
|
||||
|
|
|
@ -153,7 +153,7 @@ config ARCH_FLATMEM_ENABLE
|
|||
config ARCH_SPARSEMEM_ENABLE
|
||||
def_bool y
|
||||
depends on MMU
|
||||
select SPARSEMEM_STATIC if 32BIT && SPARSMEM
|
||||
select SPARSEMEM_STATIC if 32BIT && SPARSEMEM
|
||||
select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
|
||||
|
||||
config ARCH_SELECT_MEMORY_MODEL
|
||||
|
|
|
@ -130,6 +130,9 @@ skip_context_tracking:
|
|||
*/
|
||||
andi t0, s1, SR_PIE
|
||||
beqz t0, 1f
|
||||
/* kprobes, entered via ebreak, must have interrupts disabled. */
|
||||
li t0, EXC_BREAKPOINT
|
||||
beq s4, t0, 1f
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
call trace_hardirqs_on
|
||||
#endif
|
||||
|
|
|
@ -9,10 +9,16 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
|||
struct kprobe *p;
|
||||
struct pt_regs *regs;
|
||||
struct kprobe_ctlblk *kcb;
|
||||
int bit;
|
||||
|
||||
bit = ftrace_test_recursion_trylock(ip, parent_ip);
|
||||
if (bit < 0)
|
||||
return;
|
||||
|
||||
preempt_disable_notrace();
|
||||
p = get_kprobe((kprobe_opcode_t *)ip);
|
||||
if (unlikely(!p) || kprobe_disabled(p))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
regs = ftrace_get_regs(fregs);
|
||||
kcb = get_kprobe_ctlblk();
|
||||
|
@ -45,6 +51,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
|||
*/
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
out:
|
||||
preempt_enable_notrace();
|
||||
ftrace_test_recursion_unlock(bit);
|
||||
}
|
||||
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
|
||||
|
||||
|
|
|
@ -178,6 +178,7 @@ asmlinkage __visible void do_trap_break(struct pt_regs *regs)
|
|||
else
|
||||
die(regs, "Kernel BUG");
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_trap_break);
|
||||
|
||||
#ifdef CONFIG_GENERIC_BUG
|
||||
int is_valid_bugaddr(unsigned long pc)
|
||||
|
|
|
@ -328,3 +328,4 @@ good_area:
|
|||
}
|
||||
return;
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_page_fault);
|
||||
|
|
|
@ -401,15 +401,13 @@ ENTRY(\name)
|
|||
brasl %r14,.Lcleanup_sie_int
|
||||
#endif
|
||||
0: CHECK_STACK __LC_SAVE_AREA_ASYNC
|
||||
lgr %r11,%r15
|
||||
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
stg %r11,__SF_BACKCHAIN(%r15)
|
||||
j 2f
|
||||
1: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
lctlg %c1,%c1,__LC_KERNEL_ASCE
|
||||
lg %r15,__LC_KERNEL_STACK
|
||||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
2: la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
stmg %r0,%r7,__PT_R0(%r11)
|
||||
# clear user controlled registers to prevent speculative use
|
||||
xgr %r0,%r0
|
||||
|
@ -445,6 +443,7 @@ INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
|
|||
* Load idle PSW.
|
||||
*/
|
||||
ENTRY(psw_idle)
|
||||
stg %r14,(__SF_GPRS+8*8)(%r15)
|
||||
stg %r3,__SF_EMPTY(%r15)
|
||||
larl %r1,psw_idle_exit
|
||||
stg %r1,__SF_EMPTY+8(%r15)
|
||||
|
|
|
@ -56,8 +56,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
|||
else
|
||||
set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
|
||||
|
||||
/* Flush this CPU's TLB. */
|
||||
/*
|
||||
* Flush this CPU's TLB, assuming whoever did the allocation/free is
|
||||
* likely to continue running on this CPU.
|
||||
*/
|
||||
preempt_disable();
|
||||
flush_tlb_one_kernel(addr);
|
||||
preempt_enable();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ SYM_FUNC_START(do_suspend_lowlevel)
|
|||
movq pt_regs_r14(%rax), %r14
|
||||
movq pt_regs_r15(%rax), %r15
|
||||
|
||||
#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
|
||||
#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
|
||||
/*
|
||||
* The suspend path may have poisoned some areas deeper in the stack,
|
||||
* which we now need to unpoison.
|
||||
|
|
|
@ -1045,9 +1045,6 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
cleanup_highmap();
|
||||
|
||||
/* Look for ACPI tables and reserve memory occupied by them. */
|
||||
acpi_boot_table_init();
|
||||
|
||||
memblock_set_current_limit(ISA_END_ADDRESS);
|
||||
e820__memblock_setup();
|
||||
|
||||
|
@ -1132,6 +1129,8 @@ void __init setup_arch(char **cmdline_p)
|
|||
reserve_initrd();
|
||||
|
||||
acpi_table_upgrade();
|
||||
/* Look for ACPI tables and reserve memory occupied by them. */
|
||||
acpi_boot_table_init();
|
||||
|
||||
vsmp_init();
|
||||
|
||||
|
|
|
@ -556,7 +556,7 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
|
|||
tsk->thread.trap_nr = X86_TRAP_GP;
|
||||
|
||||
if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
|
||||
return;
|
||||
goto exit;
|
||||
|
||||
show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
|
||||
force_sig(SIGSEGV);
|
||||
|
@ -1057,7 +1057,7 @@ static void math_error(struct pt_regs *regs, int trapnr)
|
|||
goto exit;
|
||||
|
||||
if (fixup_vdso_exception(regs, trapnr, 0, 0))
|
||||
return;
|
||||
goto exit;
|
||||
|
||||
force_sig_fault(SIGFPE, si_code,
|
||||
(void __user *)uprobe_get_trap_addr(regs));
|
||||
|
|
|
@ -6027,19 +6027,19 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
|
|||
exit_reason.basic != EXIT_REASON_PML_FULL &&
|
||||
exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
|
||||
exit_reason.basic != EXIT_REASON_TASK_SWITCH)) {
|
||||
int ndata = 3;
|
||||
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
|
||||
vcpu->run->internal.ndata = 3;
|
||||
vcpu->run->internal.data[0] = vectoring_info;
|
||||
vcpu->run->internal.data[1] = exit_reason.full;
|
||||
vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
|
||||
if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
|
||||
vcpu->run->internal.ndata++;
|
||||
vcpu->run->internal.data[3] =
|
||||
vcpu->run->internal.data[ndata++] =
|
||||
vmcs_read64(GUEST_PHYSICAL_ADDRESS);
|
||||
}
|
||||
vcpu->run->internal.data[vcpu->run->internal.ndata++] =
|
||||
vcpu->arch.last_vmentry_cpu;
|
||||
vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
|
||||
vcpu->run->internal.ndata = ndata;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -292,14 +292,16 @@ int driver_deferred_probe_check_state(struct device *dev)
|
|||
|
||||
static void deferred_probe_timeout_work_func(struct work_struct *work)
|
||||
{
|
||||
struct device_private *private, *p;
|
||||
struct device_private *p;
|
||||
|
||||
driver_deferred_probe_timeout = 0;
|
||||
driver_deferred_probe_trigger();
|
||||
flush_work(&deferred_probe_work);
|
||||
|
||||
list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
|
||||
dev_info(private->device, "deferred probe pending\n");
|
||||
mutex_lock(&deferred_probe_mutex);
|
||||
list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
|
||||
dev_info(p->device, "deferred probe pending\n");
|
||||
mutex_unlock(&deferred_probe_mutex);
|
||||
wake_up_all(&probe_timeout_waitqueue);
|
||||
}
|
||||
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
/*
|
||||
* Turris Mox module configuration bus driver
|
||||
*
|
||||
* Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
|
||||
* Copyright (C) 2019 Marek Behún <kabel@kernel.org>
|
||||
*/
|
||||
|
||||
#include <dt-bindings/bus/moxtet.h>
|
||||
|
@ -879,6 +879,6 @@ static void __exit moxtet_exit(void)
|
|||
}
|
||||
module_exit(moxtet_exit);
|
||||
|
||||
MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
|
||||
MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
|
||||
MODULE_DESCRIPTION("CZ.NIC's Turris Mox module configuration bus");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -66,7 +66,14 @@ EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
|
|||
|
||||
static void devm_clk_hw_register_fixed_factor_release(struct device *dev, void *res)
|
||||
{
|
||||
clk_hw_unregister_fixed_factor(&((struct clk_fixed_factor *)res)->hw);
|
||||
struct clk_fixed_factor *fix = res;
|
||||
|
||||
/*
|
||||
* We can not use clk_hw_unregister_fixed_factor, since it will kfree()
|
||||
* the hw, resulting in double free. Just unregister the hw and let
|
||||
* devres code kfree() it.
|
||||
*/
|
||||
clk_hw_unregister(&fix->hw);
|
||||
}
|
||||
|
||||
static struct clk_hw *
|
||||
|
|
|
@ -4357,20 +4357,19 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
|
|||
/* search the list of notifiers for this clk */
|
||||
list_for_each_entry(cn, &clk_notifier_list, node)
|
||||
if (cn->clk == clk)
|
||||
break;
|
||||
goto found;
|
||||
|
||||
/* if clk wasn't in the notifier list, allocate new clk_notifier */
|
||||
if (cn->clk != clk) {
|
||||
cn = kzalloc(sizeof(*cn), GFP_KERNEL);
|
||||
if (!cn)
|
||||
goto out;
|
||||
cn = kzalloc(sizeof(*cn), GFP_KERNEL);
|
||||
if (!cn)
|
||||
goto out;
|
||||
|
||||
cn->clk = clk;
|
||||
srcu_init_notifier_head(&cn->notifier_head);
|
||||
cn->clk = clk;
|
||||
srcu_init_notifier_head(&cn->notifier_head);
|
||||
|
||||
list_add(&cn->node, &clk_notifier_list);
|
||||
}
|
||||
list_add(&cn->node, &clk_notifier_list);
|
||||
|
||||
found:
|
||||
ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
|
||||
|
||||
clk->core->notifier_count++;
|
||||
|
@ -4395,32 +4394,28 @@ EXPORT_SYMBOL_GPL(clk_notifier_register);
|
|||
*/
|
||||
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
|
||||
{
|
||||
struct clk_notifier *cn = NULL;
|
||||
int ret = -EINVAL;
|
||||
struct clk_notifier *cn;
|
||||
int ret = -ENOENT;
|
||||
|
||||
if (!clk || !nb)
|
||||
return -EINVAL;
|
||||
|
||||
clk_prepare_lock();
|
||||
|
||||
list_for_each_entry(cn, &clk_notifier_list, node)
|
||||
if (cn->clk == clk)
|
||||
list_for_each_entry(cn, &clk_notifier_list, node) {
|
||||
if (cn->clk == clk) {
|
||||
ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
|
||||
|
||||
clk->core->notifier_count--;
|
||||
|
||||
/* XXX the notifier code should handle this better */
|
||||
if (!cn->notifier_head.head) {
|
||||
srcu_cleanup_notifier_head(&cn->notifier_head);
|
||||
list_del(&cn->node);
|
||||
kfree(cn);
|
||||
}
|
||||
break;
|
||||
|
||||
if (cn->clk == clk) {
|
||||
ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
|
||||
|
||||
clk->core->notifier_count--;
|
||||
|
||||
/* XXX the notifier code should handle this better */
|
||||
if (!cn->notifier_head.head) {
|
||||
srcu_cleanup_notifier_head(&cn->notifier_head);
|
||||
list_del(&cn->node);
|
||||
kfree(cn);
|
||||
}
|
||||
|
||||
} else {
|
||||
ret = -ENOENT;
|
||||
}
|
||||
|
||||
clk_prepare_unlock();
|
||||
|
|
|
@ -304,7 +304,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
|
|||
.name = "cam_cc_bps_clk_src",
|
||||
.parent_data = cam_cc_parent_data_2,
|
||||
.num_parents = 5,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -325,7 +325,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
|
|||
.name = "cam_cc_cci_0_clk_src",
|
||||
.parent_data = cam_cc_parent_data_5,
|
||||
.num_parents = 3,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -339,7 +339,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
|
|||
.name = "cam_cc_cci_1_clk_src",
|
||||
.parent_data = cam_cc_parent_data_5,
|
||||
.num_parents = 3,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -360,7 +360,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
|
|||
.name = "cam_cc_cphy_rx_clk_src",
|
||||
.parent_data = cam_cc_parent_data_3,
|
||||
.num_parents = 6,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -379,7 +379,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
|
|||
.name = "cam_cc_csi0phytimer_clk_src",
|
||||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = 4,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -393,7 +393,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
|
|||
.name = "cam_cc_csi1phytimer_clk_src",
|
||||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = 4,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -407,7 +407,7 @@ static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
|
|||
.name = "cam_cc_csi2phytimer_clk_src",
|
||||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = 4,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -421,7 +421,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
|
|||
.name = "cam_cc_csi3phytimer_clk_src",
|
||||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = 4,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -443,7 +443,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
|
|||
.name = "cam_cc_fast_ahb_clk_src",
|
||||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = 4,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -466,7 +466,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
|
|||
.name = "cam_cc_icp_clk_src",
|
||||
.parent_data = cam_cc_parent_data_2,
|
||||
.num_parents = 5,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -488,7 +488,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
|
|||
.name = "cam_cc_ife_0_clk_src",
|
||||
.parent_data = cam_cc_parent_data_4,
|
||||
.num_parents = 4,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -510,7 +510,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
|
|||
.name = "cam_cc_ife_0_csid_clk_src",
|
||||
.parent_data = cam_cc_parent_data_3,
|
||||
.num_parents = 6,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -524,7 +524,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
|
|||
.name = "cam_cc_ife_1_clk_src",
|
||||
.parent_data = cam_cc_parent_data_4,
|
||||
.num_parents = 4,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -538,7 +538,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
|
|||
.name = "cam_cc_ife_1_csid_clk_src",
|
||||
.parent_data = cam_cc_parent_data_3,
|
||||
.num_parents = 6,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -553,7 +553,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_4,
|
||||
.num_parents = 4,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -567,7 +567,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
|
|||
.name = "cam_cc_ife_lite_csid_clk_src",
|
||||
.parent_data = cam_cc_parent_data_3,
|
||||
.num_parents = 6,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -590,7 +590,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
|
|||
.name = "cam_cc_ipe_0_clk_src",
|
||||
.parent_data = cam_cc_parent_data_2,
|
||||
.num_parents = 5,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -613,7 +613,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
|
|||
.name = "cam_cc_jpeg_clk_src",
|
||||
.parent_data = cam_cc_parent_data_2,
|
||||
.num_parents = 5,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -635,7 +635,7 @@ static struct clk_rcg2 cam_cc_lrme_clk_src = {
|
|||
.name = "cam_cc_lrme_clk_src",
|
||||
.parent_data = cam_cc_parent_data_6,
|
||||
.num_parents = 5,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -656,7 +656,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
|
|||
.name = "cam_cc_mclk0_clk_src",
|
||||
.parent_data = cam_cc_parent_data_1,
|
||||
.num_parents = 3,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -670,7 +670,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
|
|||
.name = "cam_cc_mclk1_clk_src",
|
||||
.parent_data = cam_cc_parent_data_1,
|
||||
.num_parents = 3,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -684,7 +684,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
|
|||
.name = "cam_cc_mclk2_clk_src",
|
||||
.parent_data = cam_cc_parent_data_1,
|
||||
.num_parents = 3,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -698,7 +698,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
|
|||
.name = "cam_cc_mclk3_clk_src",
|
||||
.parent_data = cam_cc_parent_data_1,
|
||||
.num_parents = 3,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -712,7 +712,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
|
|||
.name = "cam_cc_mclk4_clk_src",
|
||||
.parent_data = cam_cc_parent_data_1,
|
||||
.num_parents = 3,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -732,7 +732,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = 4,
|
||||
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
|
|||
val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
|
||||
val &= GENMASK(socfpgaclk->width - 1, 0);
|
||||
/* Check for GPIO_DB_CLK by its offset */
|
||||
if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
|
||||
if ((uintptr_t) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
|
||||
div = val + 1;
|
||||
else
|
||||
div = (1 << val);
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <linux/security.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/idr.h>
|
||||
|
@ -96,21 +97,18 @@ struct mbox_cmd {
|
|||
* @dev: driver core device object
|
||||
* @cdev: char dev core object for ioctl operations
|
||||
* @cxlm: pointer to the parent device driver data
|
||||
* @ops_active: active user of @cxlm in ops handlers
|
||||
* @ops_dead: completion when all @cxlm ops users have exited
|
||||
* @id: id number of this memdev instance.
|
||||
*/
|
||||
struct cxl_memdev {
|
||||
struct device dev;
|
||||
struct cdev cdev;
|
||||
struct cxl_mem *cxlm;
|
||||
struct percpu_ref ops_active;
|
||||
struct completion ops_dead;
|
||||
int id;
|
||||
};
|
||||
|
||||
static int cxl_mem_major;
|
||||
static DEFINE_IDA(cxl_memdev_ida);
|
||||
static DECLARE_RWSEM(cxl_memdev_rwsem);
|
||||
static struct dentry *cxl_debugfs;
|
||||
static bool cxl_raw_allow_all;
|
||||
|
||||
|
@ -169,7 +167,7 @@ struct cxl_mem_command {
|
|||
* table will be validated against the user's input. For example, if size_in is
|
||||
* 0, and the user passed in 1, it is an error.
|
||||
*/
|
||||
static struct cxl_mem_command mem_commands[] = {
|
||||
static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
|
||||
CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
|
||||
#ifdef CONFIG_CXL_MEM_RAW_COMMANDS
|
||||
CXL_CMD(RAW, ~0, ~0, 0),
|
||||
|
@ -776,26 +774,43 @@ static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
|
|||
static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct cxl_memdev *cxlmd;
|
||||
struct inode *inode;
|
||||
int rc = -ENOTTY;
|
||||
struct cxl_memdev *cxlmd = file->private_data;
|
||||
int rc = -ENXIO;
|
||||
|
||||
inode = file_inode(file);
|
||||
cxlmd = container_of(inode->i_cdev, typeof(*cxlmd), cdev);
|
||||
|
||||
if (!percpu_ref_tryget_live(&cxlmd->ops_active))
|
||||
return -ENXIO;
|
||||
|
||||
rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
|
||||
|
||||
percpu_ref_put(&cxlmd->ops_active);
|
||||
down_read(&cxl_memdev_rwsem);
|
||||
if (cxlmd->cxlm)
|
||||
rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
|
||||
up_read(&cxl_memdev_rwsem);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cxl_memdev_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct cxl_memdev *cxlmd =
|
||||
container_of(inode->i_cdev, typeof(*cxlmd), cdev);
|
||||
|
||||
get_device(&cxlmd->dev);
|
||||
file->private_data = cxlmd;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxl_memdev_release_file(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct cxl_memdev *cxlmd =
|
||||
container_of(inode->i_cdev, typeof(*cxlmd), cdev);
|
||||
|
||||
put_device(&cxlmd->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations cxl_memdev_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.unlocked_ioctl = cxl_memdev_ioctl,
|
||||
.open = cxl_memdev_open,
|
||||
.release = cxl_memdev_release_file,
|
||||
.compat_ioctl = compat_ptr_ioctl,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
@ -984,7 +999,7 @@ static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev, u32 reg_lo,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
offset = ((u64)reg_hi << 32) | FIELD_GET(CXL_REGLOC_ADDR_MASK, reg_lo);
|
||||
offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK);
|
||||
bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo);
|
||||
|
||||
/* Basic sanity check that BAR is big enough */
|
||||
|
@ -1049,7 +1064,6 @@ static void cxl_memdev_release(struct device *dev)
|
|||
{
|
||||
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
|
||||
|
||||
percpu_ref_exit(&cxlmd->ops_active);
|
||||
ida_free(&cxl_memdev_ida, cxlmd->id);
|
||||
kfree(cxlmd);
|
||||
}
|
||||
|
@ -1066,7 +1080,7 @@ static ssize_t firmware_version_show(struct device *dev,
|
|||
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
|
||||
struct cxl_mem *cxlm = cxlmd->cxlm;
|
||||
|
||||
return sprintf(buf, "%.16s\n", cxlm->firmware_version);
|
||||
return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
|
||||
}
|
||||
static DEVICE_ATTR_RO(firmware_version);
|
||||
|
||||
|
@ -1076,7 +1090,7 @@ static ssize_t payload_max_show(struct device *dev,
|
|||
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
|
||||
struct cxl_mem *cxlm = cxlmd->cxlm;
|
||||
|
||||
return sprintf(buf, "%zu\n", cxlm->payload_size);
|
||||
return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
|
||||
}
|
||||
static DEVICE_ATTR_RO(payload_max);
|
||||
|
||||
|
@ -1087,7 +1101,7 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
|
|||
struct cxl_mem *cxlm = cxlmd->cxlm;
|
||||
unsigned long long len = range_len(&cxlm->ram_range);
|
||||
|
||||
return sprintf(buf, "%#llx\n", len);
|
||||
return sysfs_emit(buf, "%#llx\n", len);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_ram_size =
|
||||
|
@ -1100,7 +1114,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
|
|||
struct cxl_mem *cxlm = cxlmd->cxlm;
|
||||
unsigned long long len = range_len(&cxlm->pmem_range);
|
||||
|
||||
return sprintf(buf, "%#llx\n", len);
|
||||
return sysfs_emit(buf, "%#llx\n", len);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_pmem_size =
|
||||
|
@ -1150,27 +1164,24 @@ static const struct device_type cxl_memdev_type = {
|
|||
.groups = cxl_memdev_attribute_groups,
|
||||
};
|
||||
|
||||
static void cxlmdev_unregister(void *_cxlmd)
|
||||
static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd)
|
||||
{
|
||||
down_write(&cxl_memdev_rwsem);
|
||||
cxlmd->cxlm = NULL;
|
||||
up_write(&cxl_memdev_rwsem);
|
||||
}
|
||||
|
||||
static void cxl_memdev_unregister(void *_cxlmd)
|
||||
{
|
||||
struct cxl_memdev *cxlmd = _cxlmd;
|
||||
struct device *dev = &cxlmd->dev;
|
||||
|
||||
percpu_ref_kill(&cxlmd->ops_active);
|
||||
cdev_device_del(&cxlmd->cdev, dev);
|
||||
wait_for_completion(&cxlmd->ops_dead);
|
||||
cxlmd->cxlm = NULL;
|
||||
cxl_memdev_shutdown(cxlmd);
|
||||
put_device(dev);
|
||||
}
|
||||
|
||||
static void cxlmdev_ops_active_release(struct percpu_ref *ref)
|
||||
{
|
||||
struct cxl_memdev *cxlmd =
|
||||
container_of(ref, typeof(*cxlmd), ops_active);
|
||||
|
||||
complete(&cxlmd->ops_dead);
|
||||
}
|
||||
|
||||
static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
|
||||
static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm)
|
||||
{
|
||||
struct pci_dev *pdev = cxlm->pdev;
|
||||
struct cxl_memdev *cxlmd;
|
||||
|
@ -1180,22 +1191,11 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
|
|||
|
||||
cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
|
||||
if (!cxlmd)
|
||||
return -ENOMEM;
|
||||
init_completion(&cxlmd->ops_dead);
|
||||
|
||||
/*
|
||||
* @cxlm is deallocated when the driver unbinds so operations
|
||||
* that are using it need to hold a live reference.
|
||||
*/
|
||||
cxlmd->cxlm = cxlm;
|
||||
rc = percpu_ref_init(&cxlmd->ops_active, cxlmdev_ops_active_release, 0,
|
||||
GFP_KERNEL);
|
||||
if (rc)
|
||||
goto err_ref;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
|
||||
if (rc < 0)
|
||||
goto err_id;
|
||||
goto err;
|
||||
cxlmd->id = rc;
|
||||
|
||||
dev = &cxlmd->dev;
|
||||
|
@ -1204,30 +1204,54 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
|
|||
dev->bus = &cxl_bus_type;
|
||||
dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
|
||||
dev->type = &cxl_memdev_type;
|
||||
dev_set_name(dev, "mem%d", cxlmd->id);
|
||||
device_set_pm_not_required(dev);
|
||||
|
||||
cdev = &cxlmd->cdev;
|
||||
cdev_init(cdev, &cxl_memdev_fops);
|
||||
return cxlmd;
|
||||
|
||||
err:
|
||||
kfree(cxlmd);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
|
||||
{
|
||||
struct cxl_memdev *cxlmd;
|
||||
struct device *dev;
|
||||
struct cdev *cdev;
|
||||
int rc;
|
||||
|
||||
cxlmd = cxl_memdev_alloc(cxlm);
|
||||
if (IS_ERR(cxlmd))
|
||||
return PTR_ERR(cxlmd);
|
||||
|
||||
dev = &cxlmd->dev;
|
||||
rc = dev_set_name(dev, "mem%d", cxlmd->id);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
/*
|
||||
* Activate ioctl operations, no cxl_memdev_rwsem manipulation
|
||||
* needed as this is ordered with cdev_add() publishing the device.
|
||||
*/
|
||||
cxlmd->cxlm = cxlm;
|
||||
|
||||
cdev = &cxlmd->cdev;
|
||||
rc = cdev_device_add(cdev, dev);
|
||||
if (rc)
|
||||
goto err_add;
|
||||
goto err;
|
||||
|
||||
return devm_add_action_or_reset(dev->parent, cxlmdev_unregister, cxlmd);
|
||||
return devm_add_action_or_reset(dev->parent, cxl_memdev_unregister,
|
||||
cxlmd);
|
||||
|
||||
err_add:
|
||||
ida_free(&cxl_memdev_ida, cxlmd->id);
|
||||
err_id:
|
||||
err:
|
||||
/*
|
||||
* Theoretically userspace could have already entered the fops,
|
||||
* so flush ops_active.
|
||||
* The cdev was briefly live, shutdown any ioctl operations that
|
||||
* saw that state.
|
||||
*/
|
||||
percpu_ref_kill(&cxlmd->ops_active);
|
||||
wait_for_completion(&cxlmd->ops_dead);
|
||||
percpu_ref_exit(&cxlmd->ops_active);
|
||||
err_ref:
|
||||
kfree(cxlmd);
|
||||
|
||||
cxl_memdev_shutdown(cxlmd);
|
||||
put_device(dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1396,6 +1420,7 @@ out:
|
|||
*/
|
||||
static int cxl_mem_identify(struct cxl_mem *cxlm)
|
||||
{
|
||||
/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
|
||||
struct cxl_mbox_identify {
|
||||
char fw_revision[0x10];
|
||||
__le64 total_capacity;
|
||||
|
@ -1424,10 +1449,11 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
|
|||
* For now, only the capacity is exported in sysfs
|
||||
*/
|
||||
cxlm->ram_range.start = 0;
|
||||
cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) - 1;
|
||||
cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1;
|
||||
|
||||
cxlm->pmem_range.start = 0;
|
||||
cxlm->pmem_range.end = le64_to_cpu(id.persistent_capacity) - 1;
|
||||
cxlm->pmem_range.end =
|
||||
le64_to_cpu(id.persistent_capacity) * SZ_256M - 1;
|
||||
|
||||
memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
|
||||
|
||||
|
|
|
@ -90,13 +90,11 @@ static ssize_t do_id_store(struct device_driver *drv, const char *buf,
|
|||
list_add(&dax_id->list, &dax_drv->ids);
|
||||
} else
|
||||
rc = -ENOMEM;
|
||||
} else
|
||||
/* nothing to remove */;
|
||||
}
|
||||
} else if (action == ID_REMOVE) {
|
||||
list_del(&dax_id->list);
|
||||
kfree(dax_id);
|
||||
} else
|
||||
/* dax_id already added */;
|
||||
}
|
||||
mutex_unlock(&dax_bus_lock);
|
||||
|
||||
if (rc < 0)
|
||||
|
|
|
@ -1086,6 +1086,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
|
|||
kfree(chan->dev);
|
||||
err_free_local:
|
||||
free_percpu(chan->local);
|
||||
chan->local = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ config DW_DMAC_CORE
|
|||
|
||||
config DW_DMAC
|
||||
tristate "Synopsys DesignWare AHB DMA platform driver"
|
||||
depends on HAS_IOMEM
|
||||
select DW_DMAC_CORE
|
||||
help
|
||||
Support the Synopsys DesignWare AHB DMA controller. This
|
||||
|
@ -18,6 +19,7 @@ config DW_DMAC
|
|||
config DW_DMAC_PCI
|
||||
tristate "Synopsys DesignWare AHB DMA PCI driver"
|
||||
depends on PCI
|
||||
depends on HAS_IOMEM
|
||||
select DW_DMAC_CORE
|
||||
help
|
||||
Support the Synopsys DesignWare AHB DMA controller on the
|
||||
|
|
|
@ -282,6 +282,22 @@ void idxd_wq_drain(struct idxd_wq *wq)
|
|||
idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
|
||||
}
|
||||
|
||||
void idxd_wq_reset(struct idxd_wq *wq)
|
||||
{
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
u32 operand;
|
||||
|
||||
if (wq->state != IDXD_WQ_ENABLED) {
|
||||
dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
|
||||
return;
|
||||
}
|
||||
|
||||
operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
|
||||
idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
|
||||
wq->state = IDXD_WQ_DISABLED;
|
||||
}
|
||||
|
||||
int idxd_wq_map_portal(struct idxd_wq *wq)
|
||||
{
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
|
@ -363,8 +379,6 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
|
|||
void idxd_wq_disable_cleanup(struct idxd_wq *wq)
|
||||
{
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
int i, wq_offset;
|
||||
|
||||
lockdep_assert_held(&idxd->dev_lock);
|
||||
memset(wq->wqcfg, 0, idxd->wqcfg_size);
|
||||
|
@ -376,14 +390,6 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
|
|||
wq->ats_dis = 0;
|
||||
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
|
||||
memset(wq->name, 0, WQ_NAME_SIZE);
|
||||
|
||||
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
|
||||
wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
|
||||
iowrite32(0, idxd->reg_base + wq_offset);
|
||||
dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
|
||||
wq->id, i, wq_offset,
|
||||
ioread32(idxd->reg_base + wq_offset));
|
||||
}
|
||||
}
|
||||
|
||||
/* Device control bits */
|
||||
|
@ -574,6 +580,36 @@ void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
|
|||
}
|
||||
|
||||
/* Device configuration bits */
|
||||
void idxd_msix_perm_setup(struct idxd_device *idxd)
|
||||
{
|
||||
union msix_perm mperm;
|
||||
int i, msixcnt;
|
||||
|
||||
msixcnt = pci_msix_vec_count(idxd->pdev);
|
||||
if (msixcnt < 0)
|
||||
return;
|
||||
|
||||
mperm.bits = 0;
|
||||
mperm.pasid = idxd->pasid;
|
||||
mperm.pasid_en = device_pasid_enabled(idxd);
|
||||
for (i = 1; i < msixcnt; i++)
|
||||
iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
|
||||
}
|
||||
|
||||
void idxd_msix_perm_clear(struct idxd_device *idxd)
|
||||
{
|
||||
union msix_perm mperm;
|
||||
int i, msixcnt;
|
||||
|
||||
msixcnt = pci_msix_vec_count(idxd->pdev);
|
||||
if (msixcnt < 0)
|
||||
return;
|
||||
|
||||
mperm.bits = 0;
|
||||
for (i = 1; i < msixcnt; i++)
|
||||
iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
|
||||
}
|
||||
|
||||
static void idxd_group_config_write(struct idxd_group *group)
|
||||
{
|
||||
struct idxd_device *idxd = group->idxd;
|
||||
|
@ -642,7 +678,14 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
|
|||
if (!wq->group)
|
||||
return 0;
|
||||
|
||||
memset(wq->wqcfg, 0, idxd->wqcfg_size);
|
||||
/*
|
||||
* Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
|
||||
* wq reset. This will copy back the sticky values that are present on some devices.
|
||||
*/
|
||||
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
|
||||
wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
|
||||
wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
|
||||
}
|
||||
|
||||
/* byte 0-3 */
|
||||
wq->wqcfg->wq_size = wq->size;
|
||||
|
|
|
@ -316,6 +316,8 @@ void idxd_unregister_driver(void);
|
|||
struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
|
||||
|
||||
/* device interrupt control */
|
||||
void idxd_msix_perm_setup(struct idxd_device *idxd);
|
||||
void idxd_msix_perm_clear(struct idxd_device *idxd);
|
||||
irqreturn_t idxd_irq_handler(int vec, void *data);
|
||||
irqreturn_t idxd_misc_thread(int vec, void *data);
|
||||
irqreturn_t idxd_wq_thread(int irq, void *data);
|
||||
|
@ -341,6 +343,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq);
|
|||
int idxd_wq_enable(struct idxd_wq *wq);
|
||||
int idxd_wq_disable(struct idxd_wq *wq);
|
||||
void idxd_wq_drain(struct idxd_wq *wq);
|
||||
void idxd_wq_reset(struct idxd_wq *wq);
|
||||
int idxd_wq_map_portal(struct idxd_wq *wq);
|
||||
void idxd_wq_unmap_portal(struct idxd_wq *wq);
|
||||
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
|
||||
|
|
|
@ -65,7 +65,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
|
|||
struct idxd_irq_entry *irq_entry;
|
||||
int i, msixcnt;
|
||||
int rc = 0;
|
||||
union msix_perm mperm;
|
||||
|
||||
msixcnt = pci_msix_vec_count(pdev);
|
||||
if (msixcnt < 0) {
|
||||
|
@ -144,14 +143,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
|
|||
}
|
||||
|
||||
idxd_unmask_error_interrupts(idxd);
|
||||
|
||||
/* Setup MSIX permission table */
|
||||
mperm.bits = 0;
|
||||
mperm.pasid = idxd->pasid;
|
||||
mperm.pasid_en = device_pasid_enabled(idxd);
|
||||
for (i = 1; i < msixcnt; i++)
|
||||
iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
|
||||
|
||||
idxd_msix_perm_setup(idxd);
|
||||
return 0;
|
||||
|
||||
err_no_irq:
|
||||
|
@ -510,6 +502,7 @@ static void idxd_shutdown(struct pci_dev *pdev)
|
|||
idxd_flush_work_list(irq_entry);
|
||||
}
|
||||
|
||||
idxd_msix_perm_clear(idxd);
|
||||
destroy_workqueue(idxd->wq);
|
||||
}
|
||||
|
||||
|
|
|
@ -124,7 +124,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
|||
for (i = 0; i < 4; i++)
|
||||
idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
|
||||
IDXD_SWERR_OFFSET + i * sizeof(u64));
|
||||
iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
|
||||
|
||||
iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
|
||||
idxd->reg_base + IDXD_SWERR_OFFSET);
|
||||
|
||||
if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
|
||||
int id = idxd->sw_err.wq_idx;
|
||||
|
|
|
@ -275,7 +275,6 @@ static void disable_wq(struct idxd_wq *wq)
|
|||
{
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
int rc;
|
||||
|
||||
mutex_lock(&wq->wq_lock);
|
||||
dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
|
||||
|
@ -296,17 +295,13 @@ static void disable_wq(struct idxd_wq *wq)
|
|||
idxd_wq_unmap_portal(wq);
|
||||
|
||||
idxd_wq_drain(wq);
|
||||
rc = idxd_wq_disable(wq);
|
||||
idxd_wq_reset(wq);
|
||||
|
||||
idxd_wq_free_resources(wq);
|
||||
wq->client_count = 0;
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
|
||||
if (rc < 0)
|
||||
dev_warn(dev, "Failed to disable %s: %d\n",
|
||||
dev_name(&wq->conf_dev), rc);
|
||||
else
|
||||
dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
|
||||
dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
|
||||
}
|
||||
|
||||
static int idxd_config_bus_remove(struct device *dev)
|
||||
|
@ -989,7 +984,7 @@ static ssize_t wq_size_store(struct device *dev,
|
|||
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
|
||||
return -EPERM;
|
||||
|
||||
if (wq->state != IDXD_WQ_DISABLED)
|
||||
if (idxd->state == IDXD_DEV_ENABLED)
|
||||
return -EPERM;
|
||||
|
||||
if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
|
||||
|
@ -1449,8 +1444,14 @@ static ssize_t op_cap_show(struct device *dev,
|
|||
{
|
||||
struct idxd_device *idxd =
|
||||
container_of(dev, struct idxd_device, conf_dev);
|
||||
int i, rc = 0;
|
||||
|
||||
return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
|
||||
for (i = 0; i < 4; i++)
|
||||
rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
|
||||
|
||||
rc--;
|
||||
rc += sysfs_emit_at(buf, rc, "\n");
|
||||
return rc;
|
||||
}
|
||||
static DEVICE_ATTR_RO(op_cap);
|
||||
|
||||
|
|
|
@ -507,10 +507,8 @@ static int plx_dma_create(struct pci_dev *pdev)
|
|||
|
||||
rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
|
||||
KBUILD_MODNAME, plxdev);
|
||||
if (rc) {
|
||||
kfree(plxdev);
|
||||
return rc;
|
||||
}
|
||||
if (rc)
|
||||
goto free_plx;
|
||||
|
||||
spin_lock_init(&plxdev->ring_lock);
|
||||
tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
|
||||
|
@ -540,14 +538,20 @@ static int plx_dma_create(struct pci_dev *pdev)
|
|||
rc = dma_async_device_register(dma);
|
||||
if (rc) {
|
||||
pci_err(pdev, "Failed to register dma device: %d\n", rc);
|
||||
free_irq(pci_irq_vector(pdev, 0), plxdev);
|
||||
kfree(plxdev);
|
||||
return rc;
|
||||
goto put_device;
|
||||
}
|
||||
|
||||
pci_set_drvdata(pdev, plxdev);
|
||||
|
||||
return 0;
|
||||
|
||||
put_device:
|
||||
put_device(&pdev->dev);
|
||||
free_irq(pci_irq_vector(pdev, 0), plxdev);
|
||||
free_plx:
|
||||
kfree(plxdev);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int plx_dma_probe(struct pci_dev *pdev,
|
||||
|
|
|
@ -723,7 +723,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
|
|||
goto end;
|
||||
}
|
||||
if (!tdc->busy) {
|
||||
err = pm_runtime_get_sync(tdc->tdma->dev);
|
||||
err = pm_runtime_resume_and_get(tdc->tdma->dev);
|
||||
if (err < 0) {
|
||||
dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
|
||||
goto end;
|
||||
|
@ -818,7 +818,7 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
|
|||
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
|
||||
int err;
|
||||
|
||||
err = pm_runtime_get_sync(tdc->tdma->dev);
|
||||
err = pm_runtime_resume_and_get(tdc->tdma->dev);
|
||||
if (err < 0) {
|
||||
dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
|
||||
return;
|
||||
|
|
|
@ -839,6 +839,7 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
|
|||
struct xilinx_dpdma_tx_desc *desc;
|
||||
struct virt_dma_desc *vdesc;
|
||||
u32 reg, channels;
|
||||
bool first_frame;
|
||||
|
||||
lockdep_assert_held(&chan->lock);
|
||||
|
||||
|
@ -852,14 +853,6 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
|
|||
chan->running = true;
|
||||
}
|
||||
|
||||
if (chan->video_group)
|
||||
channels = xilinx_dpdma_chan_video_group_ready(chan);
|
||||
else
|
||||
channels = BIT(chan->id);
|
||||
|
||||
if (!channels)
|
||||
return;
|
||||
|
||||
vdesc = vchan_next_desc(&chan->vchan);
|
||||
if (!vdesc)
|
||||
return;
|
||||
|
@ -884,13 +877,26 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
|
|||
FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
|
||||
upper_32_bits(sw_desc->dma_addr)));
|
||||
|
||||
if (chan->first_frame)
|
||||
first_frame = chan->first_frame;
|
||||
chan->first_frame = false;
|
||||
|
||||
if (chan->video_group) {
|
||||
channels = xilinx_dpdma_chan_video_group_ready(chan);
|
||||
/*
|
||||
* Trigger the transfer only when all channels in the group are
|
||||
* ready.
|
||||
*/
|
||||
if (!channels)
|
||||
return;
|
||||
} else {
|
||||
channels = BIT(chan->id);
|
||||
}
|
||||
|
||||
if (first_frame)
|
||||
reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
|
||||
else
|
||||
reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
|
||||
|
||||
chan->first_frame = false;
|
||||
|
||||
dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
|
||||
}
|
||||
|
||||
|
@ -1042,13 +1048,14 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
|
|||
*/
|
||||
static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
|
||||
{
|
||||
struct xilinx_dpdma_tx_desc *active = chan->desc.active;
|
||||
struct xilinx_dpdma_tx_desc *active;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&chan->lock, flags);
|
||||
|
||||
xilinx_dpdma_debugfs_desc_done_irq(chan);
|
||||
|
||||
active = chan->desc.active;
|
||||
if (active)
|
||||
vchan_cyclic_callback(&active->vdesc);
|
||||
else
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
/*
|
||||
* Turris Mox rWTM firmware driver
|
||||
*
|
||||
* Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
|
||||
* Copyright (C) 2019 Marek Behún <kabel@kernel.org>
|
||||
*/
|
||||
|
||||
#include <linux/armada-37xx-rwtm-mailbox.h>
|
||||
|
@ -547,4 +547,4 @@ module_platform_driver(turris_mox_rwtm_driver);
|
|||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Turris Mox rWTM firmware driver");
|
||||
MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
|
||||
MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
/*
|
||||
* Turris Mox Moxtet GPIO expander
|
||||
*
|
||||
* Copyright (C) 2018 Marek Behun <marek.behun@nic.cz>
|
||||
* Copyright (C) 2018 Marek Behún <kabel@kernel.org>
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
|
@ -174,6 +174,6 @@ static struct moxtet_driver moxtet_gpio_driver = {
|
|||
};
|
||||
module_moxtet_driver(moxtet_gpio_driver);
|
||||
|
||||
MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
|
||||
MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
|
||||
MODULE_DESCRIPTION("Turris Mox Moxtet GPIO expander");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -458,6 +458,8 @@ static ssize_t export_store(struct class *class,
|
|||
long gpio;
|
||||
struct gpio_desc *desc;
|
||||
int status;
|
||||
struct gpio_chip *gc;
|
||||
int offset;
|
||||
|
||||
status = kstrtol(buf, 0, &gpio);
|
||||
if (status < 0)
|
||||
|
@ -469,6 +471,12 @@ static ssize_t export_store(struct class *class,
|
|||
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
|
||||
return -EINVAL;
|
||||
}
|
||||
gc = desc->gdev->chip;
|
||||
offset = gpio_chip_hwgpio(desc);
|
||||
if (!gpiochip_line_is_valid(gc, offset)) {
|
||||
pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* No extra locking here; FLAG_SYSFS just signifies that the
|
||||
* request and export were done by on behalf of userspace, so
|
||||
|
|
|
@ -646,7 +646,6 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
|
|||
break;
|
||||
case INTEL_BACKLIGHT_DISPLAY_DDI:
|
||||
try_intel_interface = true;
|
||||
try_vesa_interface = true;
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
|
|
|
@ -992,14 +992,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
|
|||
* FIXME As we do with eDP, just make a note of the time here
|
||||
* and perform the wait before the next panel power on.
|
||||
*/
|
||||
intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
|
||||
msleep(intel_dsi->panel_pwr_cycle_delay);
|
||||
}
|
||||
|
||||
static void intel_dsi_shutdown(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
|
||||
intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
|
||||
msleep(intel_dsi->panel_pwr_cycle_delay);
|
||||
}
|
||||
|
||||
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
|
|
|
@ -5471,12 +5471,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
|
|||
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
|
||||
int ret;
|
||||
|
||||
memset(wm, 0, sizeof(*wm));
|
||||
|
||||
/* Watermarks calculated in master */
|
||||
if (plane_state->planar_slave)
|
||||
return 0;
|
||||
|
||||
memset(wm, 0, sizeof(*wm));
|
||||
|
||||
if (plane_state->planar_linked_plane) {
|
||||
const struct drm_framebuffer *fb = plane_state->hw.fb;
|
||||
enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/bitops.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -22,9 +23,13 @@
|
|||
|
||||
#define ACEL_EN BIT(0)
|
||||
#define GYRO_EN BIT(1)
|
||||
#define MAGNO_EN BIT(2)
|
||||
#define MAGNO_EN BIT(2)
|
||||
#define ALS_EN BIT(19)
|
||||
|
||||
static int sensor_mask_override = -1;
|
||||
module_param_named(sensor_mask, sensor_mask_override, int, 0444);
|
||||
MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
|
||||
|
||||
void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
|
||||
{
|
||||
union sfh_cmd_param cmd_param;
|
||||
|
@ -73,12 +78,41 @@ void amd_stop_all_sensors(struct amd_mp2_dev *privdata)
|
|||
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
|
||||
}
|
||||
|
||||
static const struct dmi_system_id dmi_sensor_mask_overrides[] = {
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 13-ag0xxx"),
|
||||
},
|
||||
.driver_data = (void *)(ACEL_EN | MAGNO_EN),
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 15-cp0xxx"),
|
||||
},
|
||||
.driver_data = (void *)(ACEL_EN | MAGNO_EN),
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id)
|
||||
{
|
||||
int activestatus, num_of_sensors = 0;
|
||||
const struct dmi_system_id *dmi_id;
|
||||
u32 activecontrolstatus;
|
||||
|
||||
if (sensor_mask_override == -1) {
|
||||
dmi_id = dmi_first_match(dmi_sensor_mask_overrides);
|
||||
if (dmi_id)
|
||||
sensor_mask_override = (long)dmi_id->driver_data;
|
||||
}
|
||||
|
||||
if (sensor_mask_override >= 0) {
|
||||
activestatus = sensor_mask_override;
|
||||
} else {
|
||||
activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
|
||||
activestatus = activecontrolstatus >> 4;
|
||||
}
|
||||
|
||||
privdata->activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
|
||||
activestatus = privdata->activecontrolstatus >> 4;
|
||||
if (ACEL_EN & activestatus)
|
||||
sensor_id[num_of_sensors++] = accel_idx;
|
||||
|
||||
|
|
|
@ -61,7 +61,6 @@ struct amd_mp2_dev {
|
|||
struct pci_dev *pdev;
|
||||
struct amdtp_cl_data *cl_data;
|
||||
void __iomem *mmio;
|
||||
u32 activecontrolstatus;
|
||||
};
|
||||
|
||||
struct amd_mp2_sensor_info {
|
||||
|
|
|
@ -761,6 +761,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
|
|||
|
||||
if (input_register_device(data->input2)) {
|
||||
input_free_device(input2);
|
||||
ret = -ENOENT;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1221,6 +1221,9 @@ static const struct hid_device_id asus_devices[] = {
|
|||
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
|
||||
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD),
|
||||
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
|
||||
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2),
|
||||
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
|
||||
USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
|
||||
QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
|
||||
|
|
|
@ -161,6 +161,7 @@ struct cp2112_device {
|
|||
atomic_t read_avail;
|
||||
atomic_t xfer_avail;
|
||||
struct gpio_chip gc;
|
||||
struct irq_chip irq;
|
||||
u8 *in_out_buffer;
|
||||
struct mutex lock;
|
||||
|
||||
|
@ -1175,16 +1176,6 @@ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_chip cp2112_gpio_irqchip = {
|
||||
.name = "cp2112-gpio",
|
||||
.irq_startup = cp2112_gpio_irq_startup,
|
||||
.irq_shutdown = cp2112_gpio_irq_shutdown,
|
||||
.irq_ack = cp2112_gpio_irq_ack,
|
||||
.irq_mask = cp2112_gpio_irq_mask,
|
||||
.irq_unmask = cp2112_gpio_irq_unmask,
|
||||
.irq_set_type = cp2112_gpio_irq_type,
|
||||
};
|
||||
|
||||
static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
|
||||
int pin)
|
||||
{
|
||||
|
@ -1339,8 +1330,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
|||
dev->gc.can_sleep = 1;
|
||||
dev->gc.parent = &hdev->dev;
|
||||
|
||||
dev->irq.name = "cp2112-gpio";
|
||||
dev->irq.irq_startup = cp2112_gpio_irq_startup;
|
||||
dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown;
|
||||
dev->irq.irq_ack = cp2112_gpio_irq_ack;
|
||||
dev->irq.irq_mask = cp2112_gpio_irq_mask;
|
||||
dev->irq.irq_unmask = cp2112_gpio_irq_unmask;
|
||||
dev->irq.irq_set_type = cp2112_gpio_irq_type;
|
||||
dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND;
|
||||
|
||||
girq = &dev->gc.irq;
|
||||
girq->chip = &cp2112_gpio_irqchip;
|
||||
girq->chip = &dev->irq;
|
||||
/* The event comes from the outside so no parent handler */
|
||||
girq->parent_handler = NULL;
|
||||
girq->num_parents = 0;
|
||||
|
|
|
@ -573,6 +573,8 @@ static void hammer_remove(struct hid_device *hdev)
|
|||
}
|
||||
|
||||
static const struct hid_device_id hammer_devices[] = {
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
|
|
|
@ -194,6 +194,7 @@
|
|||
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837
|
||||
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
|
||||
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866
|
||||
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2 0x19b6
|
||||
#define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869
|
||||
|
||||
#define USB_VENDOR_ID_ATEN 0x0557
|
||||
|
@ -493,6 +494,7 @@
|
|||
#define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
|
||||
#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
|
||||
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
|
||||
#define USB_DEVICE_ID_GOOGLE_DON 0x5050
|
||||
|
||||
#define USB_VENDOR_ID_GOTOP 0x08f2
|
||||
#define USB_DEVICE_ID_SUPER_Q2 0x007f
|
||||
|
|
|
@ -2533,7 +2533,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
|
|||
!wacom_wac->shared->is_touch_on) {
|
||||
if (!wacom_wac->shared->touch_down)
|
||||
return;
|
||||
prox = 0;
|
||||
prox = false;
|
||||
}
|
||||
|
||||
wacom_wac->hid_data.num_received++;
|
||||
|
@ -3574,8 +3574,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
|
|||
{
|
||||
struct wacom_features *features = &wacom_wac->features;
|
||||
|
||||
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
|
||||
|
||||
if (!(features->device_type & WACOM_DEVICETYPE_PEN))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -3590,6 +3588,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
|
||||
__set_bit(BTN_TOUCH, input_dev->keybit);
|
||||
__set_bit(ABS_MISC, input_dev->absbit);
|
||||
|
||||
|
@ -3742,8 +3741,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
|
|||
{
|
||||
struct wacom_features *features = &wacom_wac->features;
|
||||
|
||||
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
|
||||
|
||||
if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -3756,6 +3753,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
|
|||
/* setup has already been done */
|
||||
return 0;
|
||||
|
||||
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
|
||||
__set_bit(BTN_TOUCH, input_dev->keybit);
|
||||
|
||||
if (features->touch_max == 1) {
|
||||
|
|
|
@ -129,6 +129,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
|
|||
if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
|
||||
!= DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
|
||||
dev_err(dev->dev, "High Speed not supported!\n");
|
||||
t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
|
||||
dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
|
||||
dev->master_cfg |= DW_IC_CON_SPEED_FAST;
|
||||
dev->hs_hcnt = 0;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/**
|
||||
/*
|
||||
* i2c-exynos5.c - Samsung Exynos5 I2C Controller Driver
|
||||
*
|
||||
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright (c) 2014 Linaro Ltd.
|
||||
* Copyright (c) 2014 Hisilicon Limited.
|
||||
* Copyright (c) 2014 HiSilicon Limited.
|
||||
*
|
||||
* Now only support 7 bit address.
|
||||
*/
|
||||
|
|
|
@ -525,8 +525,8 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
|
|||
i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
|
||||
data = *i2c->wbuf;
|
||||
data &= ~JZ4780_I2C_DC_READ;
|
||||
if ((!i2c->stop_hold) && (i2c->cdata->version >=
|
||||
ID_X1000))
|
||||
if ((i2c->wt_len == 1) && (!i2c->stop_hold) &&
|
||||
(i2c->cdata->version >= ID_X1000))
|
||||
data |= X1000_I2C_DC_STOP;
|
||||
jz4780_i2c_writew(i2c, JZ4780_I2C_DC, data);
|
||||
i2c->wbuf++;
|
||||
|
|
|
@ -534,7 +534,7 @@ static void stm32f4_i2c_handle_rx_addr(struct stm32f4_i2c_dev *i2c_dev)
|
|||
default:
|
||||
/*
|
||||
* N-byte reception:
|
||||
* Enable ACK, reset POS (ACK postion) and clear ADDR flag.
|
||||
* Enable ACK, reset POS (ACK position) and clear ADDR flag.
|
||||
* In that way, ACK will be sent as soon as the current byte
|
||||
* will be received in the shift register
|
||||
*/
|
||||
|
|
|
@ -378,7 +378,7 @@ static int i2c_gpio_init_recovery(struct i2c_adapter *adap)
|
|||
static int i2c_init_recovery(struct i2c_adapter *adap)
|
||||
{
|
||||
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
|
||||
char *err_str;
|
||||
char *err_str, *err_level = KERN_ERR;
|
||||
|
||||
if (!bri)
|
||||
return 0;
|
||||
|
@ -387,7 +387,8 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
|
|||
return -EPROBE_DEFER;
|
||||
|
||||
if (!bri->recover_bus) {
|
||||
err_str = "no recover_bus() found";
|
||||
err_str = "no suitable method provided";
|
||||
err_level = KERN_DEBUG;
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -414,7 +415,7 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
|
|||
|
||||
return 0;
|
||||
err:
|
||||
dev_err(&adap->dev, "Not using recovery: %s\n", err_str);
|
||||
dev_printk(err_level, &adap->dev, "Not using recovery: %s\n", err_str);
|
||||
adap->bus_recovery_info = NULL;
|
||||
|
||||
return -EINVAL;
|
||||
|
|
|
@ -252,8 +252,8 @@ static int __init n64joy_probe(struct platform_device *pdev)
|
|||
mutex_init(&priv->n64joy_mutex);
|
||||
|
||||
priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (!priv->reg_base) {
|
||||
err = -EINVAL;
|
||||
if (IS_ERR(priv->reg_base)) {
|
||||
err = PTR_ERR(priv->reg_base);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
|
|
@ -93,9 +93,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
|
||||
static int nspire_keypad_open(struct input_dev *input)
|
||||
{
|
||||
struct nspire_keypad *keypad = input_get_drvdata(input);
|
||||
unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles;
|
||||
int error;
|
||||
|
||||
error = clk_prepare_enable(keypad->clk);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
cycles_per_us = (clk_get_rate(keypad->clk) / 1000000);
|
||||
if (cycles_per_us == 0)
|
||||
|
@ -121,30 +127,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
|
|||
keypad->int_mask = 1 << 1;
|
||||
writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK);
|
||||
|
||||
/* Disable GPIO interrupts to prevent hanging on touchpad */
|
||||
/* Possibly used to detect touchpad events */
|
||||
writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
|
||||
/* Acknowledge existing interrupts */
|
||||
writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nspire_keypad_open(struct input_dev *input)
|
||||
{
|
||||
struct nspire_keypad *keypad = input_get_drvdata(input);
|
||||
int error;
|
||||
|
||||
error = clk_prepare_enable(keypad->clk);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = nspire_keypad_chip_init(keypad);
|
||||
if (error) {
|
||||
clk_disable_unprepare(keypad->clk);
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -152,6 +134,11 @@ static void nspire_keypad_close(struct input_dev *input)
|
|||
{
|
||||
struct nspire_keypad *keypad = input_get_drvdata(input);
|
||||
|
||||
/* Disable interrupts */
|
||||
writel(0, keypad->reg_base + KEYPAD_INTMSK);
|
||||
/* Acknowledge existing interrupts */
|
||||
writel(~0, keypad->reg_base + KEYPAD_INT);
|
||||
|
||||
clk_disable_unprepare(keypad->clk);
|
||||
}
|
||||
|
||||
|
@ -210,6 +197,25 @@ static int nspire_keypad_probe(struct platform_device *pdev)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
error = clk_prepare_enable(keypad->clk);
|
||||
if (error) {
|
||||
dev_err(&pdev->dev, "failed to enable clock\n");
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Disable interrupts */
|
||||
writel(0, keypad->reg_base + KEYPAD_INTMSK);
|
||||
/* Acknowledge existing interrupts */
|
||||
writel(~0, keypad->reg_base + KEYPAD_INT);
|
||||
|
||||
/* Disable GPIO interrupts to prevent hanging on touchpad */
|
||||
/* Possibly used to detect touchpad events */
|
||||
writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
|
||||
/* Acknowledge existing GPIO interrupts */
|
||||
writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
|
||||
|
||||
clk_disable_unprepare(keypad->clk);
|
||||
|
||||
input_set_drvdata(input, keypad);
|
||||
|
||||
input->id.bustype = BUS_HOST;
|
||||
|
|
|
@ -588,6 +588,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
|
|||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
|
||||
},
|
||||
}, {
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
|
||||
|
|
|
@ -1441,7 +1441,7 @@ static int elants_i2c_probe(struct i2c_client *client,
|
|||
|
||||
touchscreen_parse_properties(ts->input, true, &ts->prop);
|
||||
|
||||
if (ts->chip_id == EKTF3624) {
|
||||
if (ts->chip_id == EKTF3624 && ts->phy_x && ts->phy_y) {
|
||||
/* calculate resolution from size */
|
||||
ts->x_res = DIV_ROUND_CLOSEST(ts->prop.max_x, ts->phy_x);
|
||||
ts->y_res = DIV_ROUND_CLOSEST(ts->prop.max_y, ts->phy_y);
|
||||
|
@ -1449,8 +1449,7 @@ static int elants_i2c_probe(struct i2c_client *client,
|
|||
|
||||
input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
|
||||
input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
|
||||
if (ts->major_res > 0)
|
||||
input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res);
|
||||
input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res);
|
||||
|
||||
error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
|
||||
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
|
||||
|
|
|
@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data *sdata,
|
|||
u8 major = event[4];
|
||||
u8 minor = event[5];
|
||||
u8 z = event[6] & S6SY761_MASK_Z;
|
||||
u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4);
|
||||
u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y);
|
||||
u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4);
|
||||
u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y);
|
||||
|
||||
input_mt_slot(sdata->input, tid);
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
/*
|
||||
* CZ.NIC's Turris Omnia LEDs driver
|
||||
*
|
||||
* 2020 by Marek Behun <marek.behun@nic.cz>
|
||||
* 2020 by Marek Behún <kabel@kernel.org>
|
||||
*/
|
||||
|
||||
#include <linux/i2c.h>
|
||||
|
@ -287,6 +287,6 @@ static struct i2c_driver omnia_leds_driver = {
|
|||
|
||||
module_i2c_driver(omnia_leds_driver);
|
||||
|
||||
MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
|
||||
MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
|
||||
MODULE_DESCRIPTION("CZ.NIC's Turris Omnia LEDs");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
/*
|
||||
* rWTM BIU Mailbox driver for Armada 37xx
|
||||
*
|
||||
* Author: Marek Behun <marek.behun@nic.cz>
|
||||
* Author: Marek Behún <kabel@kernel.org>
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
|
@ -203,4 +203,4 @@ module_platform_driver(armada_37xx_mbox_driver);
|
|||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("rWTM BIU Mailbox driver for Armada 37xx");
|
||||
MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
|
||||
MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
|
||||
|
|
|
@ -65,7 +65,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
|
|||
u8 *res;
|
||||
|
||||
position = (index + rsb) * v->fec->roots;
|
||||
block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem);
|
||||
block = div64_u64_rem(position, v->fec->io_size, &rem);
|
||||
*offset = (unsigned)rem;
|
||||
|
||||
res = dm_bufio_read(v->fec->bufio, block, buf);
|
||||
|
@ -154,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
|
|||
|
||||
/* read the next block when we run out of parity bytes */
|
||||
offset += v->fec->roots;
|
||||
if (offset >= v->fec->roots << SECTOR_SHIFT) {
|
||||
if (offset >= v->fec->io_size) {
|
||||
dm_bufio_release(buf);
|
||||
|
||||
par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
|
||||
|
@ -742,8 +742,13 @@ int verity_fec_ctr(struct dm_verity *v)
|
|||
return -E2BIG;
|
||||
}
|
||||
|
||||
if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1))
|
||||
f->io_size = 1 << v->data_dev_block_bits;
|
||||
else
|
||||
f->io_size = v->fec->roots << SECTOR_SHIFT;
|
||||
|
||||
f->bufio = dm_bufio_client_create(f->dev->bdev,
|
||||
f->roots << SECTOR_SHIFT,
|
||||
f->io_size,
|
||||
1, 0, NULL, NULL);
|
||||
if (IS_ERR(f->bufio)) {
|
||||
ti->error = "Cannot initialize FEC bufio client";
|
||||
|
|
|
@ -36,6 +36,7 @@ struct dm_verity_fec {
|
|||
struct dm_dev *dev; /* parity data device */
|
||||
struct dm_bufio_client *data_bufio; /* for data dev access */
|
||||
struct dm_bufio_client *bufio; /* for parity data access */
|
||||
size_t io_size; /* IO size for roots */
|
||||
sector_t start; /* parity data start in blocks */
|
||||
sector_t blocks; /* number of blocks covered */
|
||||
sector_t rounds; /* number of interleaving rounds */
|
||||
|
|
|
@ -488,8 +488,8 @@ static int mtk_nfc_exec_instr(struct nand_chip *chip,
|
|||
return 0;
|
||||
case NAND_OP_WAITRDY_INSTR:
|
||||
return readl_poll_timeout(nfc->regs + NFI_STA, status,
|
||||
status & STA_BUSY, 20,
|
||||
instr->ctx.waitrdy.timeout_ms);
|
||||
!(status & STA_BUSY), 20,
|
||||
instr->ctx.waitrdy.timeout_ms * 1000);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -3161,10 +3161,17 @@ out_resources:
|
|||
return err;
|
||||
}
|
||||
|
||||
/* prod_id for switch families which do not have a PHY model number */
|
||||
static const u16 family_prod_id_table[] = {
|
||||
[MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
|
||||
[MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
|
||||
};
|
||||
|
||||
static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
|
||||
{
|
||||
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
|
||||
struct mv88e6xxx_chip *chip = mdio_bus->chip;
|
||||
u16 prod_id;
|
||||
u16 val;
|
||||
int err;
|
||||
|
||||
|
@ -3175,23 +3182,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
|
|||
err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
|
||||
mv88e6xxx_reg_unlock(chip);
|
||||
|
||||
if (reg == MII_PHYSID2) {
|
||||
/* Some internal PHYs don't have a model number. */
|
||||
if (chip->info->family != MV88E6XXX_FAMILY_6165)
|
||||
/* Then there is the 6165 family. It gets is
|
||||
* PHYs correct. But it can also have two
|
||||
* SERDES interfaces in the PHY address
|
||||
* space. And these don't have a model
|
||||
* number. But they are not PHYs, so we don't
|
||||
* want to give them something a PHY driver
|
||||
* will recognise.
|
||||
*
|
||||
* Use the mv88e6390 family model number
|
||||
* instead, for anything which really could be
|
||||
* a PHY,
|
||||
*/
|
||||
if (!(val & 0x3f0))
|
||||
val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
|
||||
/* Some internal PHYs don't have a model number. */
|
||||
if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
|
||||
chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
|
||||
prod_id = family_prod_id_table[chip->info->family];
|
||||
if (prod_id)
|
||||
val |= prod_id >> 4;
|
||||
}
|
||||
|
||||
return err ? err : val;
|
||||
|
|
|
@ -3946,6 +3946,7 @@ static int macb_init(struct platform_device *pdev)
|
|||
reg = gem_readl(bp, DCFG8);
|
||||
bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
|
||||
GEM_BFEXT(T2SCR, reg));
|
||||
INIT_LIST_HEAD(&bp->rx_fs_list.list);
|
||||
if (bp->max_tuples > 0) {
|
||||
/* also needs one ethtype match to check IPv4 */
|
||||
if (GEM_BFEXT(SCR2ETH, reg) > 0) {
|
||||
|
@ -3956,7 +3957,6 @@ static int macb_init(struct platform_device *pdev)
|
|||
/* Filtering is supported in hw but don't enable it in kernel now */
|
||||
dev->hw_features |= NETIF_F_NTUPLE;
|
||||
/* init Rx flow definitions */
|
||||
INIT_LIST_HEAD(&bp->rx_fs_list.list);
|
||||
bp->rx_fs_list.count = 0;
|
||||
spin_lock_init(&bp->rx_fs_lock);
|
||||
} else
|
||||
|
|
|
@ -412,7 +412,7 @@
|
|||
| CN6XXX_INTR_M0UNWI_ERR \
|
||||
| CN6XXX_INTR_M1UPB0_ERR \
|
||||
| CN6XXX_INTR_M1UPWI_ERR \
|
||||
| CN6XXX_INTR_M1UPB0_ERR \
|
||||
| CN6XXX_INTR_M1UNB0_ERR \
|
||||
| CN6XXX_INTR_M1UNWI_ERR \
|
||||
| CN6XXX_INTR_INSTR_DB_OF_ERR \
|
||||
| CN6XXX_INTR_SLIST_DB_OF_ERR \
|
||||
|
|
|
@ -349,18 +349,6 @@ static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
|
|||
return cxgb4_ofld_send(tx_info->netdev, skb);
|
||||
}
|
||||
|
||||
/*
|
||||
* chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
|
||||
* @tx_info - driver specific tls info.
|
||||
* return: NET_TX_OK/NET_XMIT_DROP.
|
||||
*/
|
||||
static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
|
||||
{
|
||||
return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
|
||||
TCB_T_STATE_V(TCB_T_STATE_M),
|
||||
CHCR_TCB_STATE_CLOSED, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* chcr_ktls_dev_del: call back for tls_dev_del.
|
||||
* Remove the tid and l2t entry and close the connection.
|
||||
|
@ -395,8 +383,6 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
|
|||
|
||||
/* clear tid */
|
||||
if (tx_info->tid != -1) {
|
||||
/* clear tcb state and then release tid */
|
||||
chcr_ktls_mark_tcb_close(tx_info);
|
||||
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
|
||||
tx_info->tid, tx_info->ip_family);
|
||||
}
|
||||
|
@ -574,7 +560,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
|
|||
return 0;
|
||||
|
||||
free_tid:
|
||||
chcr_ktls_mark_tcb_close(tx_info);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
/* clear clip entry */
|
||||
if (tx_info->ip_family == AF_INET6)
|
||||
|
@ -672,10 +657,6 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
|
|||
if (tx_info->pending_close) {
|
||||
spin_unlock(&tx_info->lock);
|
||||
if (!status) {
|
||||
/* it's a late success, tcb status is established,
|
||||
* mark it close.
|
||||
*/
|
||||
chcr_ktls_mark_tcb_close(tx_info);
|
||||
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
|
||||
tid, tx_info->ip_family);
|
||||
}
|
||||
|
@ -1663,54 +1644,6 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
|
|||
refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
|
||||
}
|
||||
|
||||
/*
|
||||
* chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid
|
||||
* sending the same segment again. It will discard the segment which is before
|
||||
* the current tx max.
|
||||
* @tx_info - driver specific tls info.
|
||||
* @q - TX queue.
|
||||
* return: NET_TX_OK/NET_XMIT_DROP.
|
||||
*/
|
||||
static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
|
||||
struct sge_eth_txq *q)
|
||||
{
|
||||
struct fw_ulptx_wr *wr;
|
||||
unsigned int ndesc;
|
||||
int credits;
|
||||
void *pos;
|
||||
u32 len;
|
||||
|
||||
len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
|
||||
ndesc = DIV_ROUND_UP(len, 64);
|
||||
|
||||
credits = chcr_txq_avail(&q->q) - ndesc;
|
||||
if (unlikely(credits < 0)) {
|
||||
chcr_eth_txq_stop(q);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
pos = &q->q.desc[q->q.pidx];
|
||||
|
||||
wr = pos;
|
||||
/* ULPTX wr */
|
||||
wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
|
||||
wr->cookie = 0;
|
||||
/* fill len in wr field */
|
||||
wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
|
||||
|
||||
pos += sizeof(*wr);
|
||||
|
||||
pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
|
||||
TCB_SND_UNA_RAW_W,
|
||||
TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
|
||||
TCB_SND_UNA_RAW_V(0), 0);
|
||||
|
||||
chcr_txq_advance(&q->q, ndesc);
|
||||
cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* chcr_end_part_handler: This handler will handle the record which
|
||||
* is complete or if record's end part is received. T6 adapter has a issue that
|
||||
|
@ -1735,7 +1668,9 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
|
|||
struct sge_eth_txq *q, u32 skb_offset,
|
||||
u32 tls_end_offset, bool last_wr)
|
||||
{
|
||||
bool free_skb_if_tx_fails = false;
|
||||
struct sk_buff *nskb = NULL;
|
||||
|
||||
/* check if it is a complete record */
|
||||
if (tls_end_offset == record->len) {
|
||||
nskb = skb;
|
||||
|
@ -1758,6 +1693,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
|
|||
|
||||
if (last_wr)
|
||||
dev_kfree_skb_any(skb);
|
||||
else
|
||||
free_skb_if_tx_fails = true;
|
||||
|
||||
last_wr = true;
|
||||
|
||||
|
@ -1769,6 +1706,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
|
|||
record->num_frags,
|
||||
(last_wr && tcp_push_no_fin),
|
||||
mss)) {
|
||||
if (free_skb_if_tx_fails)
|
||||
dev_kfree_skb_any(skb);
|
||||
goto out;
|
||||
}
|
||||
tx_info->prev_seq = record->end_seq;
|
||||
|
@ -1905,11 +1844,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
|
|||
/* reset tcp_seq as per the prior_data_required len */
|
||||
tcp_seq -= prior_data_len;
|
||||
}
|
||||
/* reset snd una, so the middle record won't send the already
|
||||
* sent part.
|
||||
*/
|
||||
if (chcr_ktls_update_snd_una(tx_info, q))
|
||||
goto out;
|
||||
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
|
||||
} else {
|
||||
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
|
||||
|
@ -2010,12 +1944,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
* we will send the complete record again.
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&tx_ctx->base.lock, flags);
|
||||
|
||||
do {
|
||||
int i;
|
||||
|
||||
cxgb4_reclaim_completed_tx(adap, &q->q, true);
|
||||
/* lock taken */
|
||||
spin_lock_irqsave(&tx_ctx->base.lock, flags);
|
||||
/* fetch the tls record */
|
||||
record = tls_get_record(&tx_ctx->base, tcp_seq,
|
||||
&tx_info->record_no);
|
||||
|
@ -2074,11 +2007,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
tls_end_offset, skb_offset,
|
||||
0);
|
||||
|
||||
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
|
||||
if (ret) {
|
||||
/* free the refcount taken earlier */
|
||||
if (tls_end_offset < data_len)
|
||||
dev_kfree_skb_any(skb);
|
||||
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -2088,16 +2021,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
continue;
|
||||
}
|
||||
|
||||
/* increase page reference count of the record, so that there
|
||||
* won't be any chance of page free in middle if in case stack
|
||||
* receives ACK and try to delete the record.
|
||||
*/
|
||||
for (i = 0; i < record->num_frags; i++)
|
||||
__skb_frag_ref(&record->frags[i]);
|
||||
/* lock cleared */
|
||||
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
|
||||
|
||||
|
||||
/* if a tls record is finishing in this SKB */
|
||||
if (tls_end_offset <= data_len) {
|
||||
ret = chcr_end_part_handler(tx_info, skb, record,
|
||||
|
@ -2122,13 +2045,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
data_len = 0;
|
||||
}
|
||||
|
||||
/* clear the frag ref count which increased locally before */
|
||||
for (i = 0; i < record->num_frags; i++) {
|
||||
/* clear the frag ref count */
|
||||
__skb_frag_unref(&record->frags[i]);
|
||||
}
|
||||
/* if any failure, come out from the loop. */
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
|
||||
if (th->fin)
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
|
@ -2143,6 +2062,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
} while (data_len > 0);
|
||||
|
||||
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
|
||||
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
|
||||
atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
|
||||
|
||||
|
|
|
@ -1469,8 +1469,10 @@ dm9000_probe(struct platform_device *pdev)
|
|||
|
||||
/* Init network device */
|
||||
ndev = alloc_etherdev(sizeof(struct board_info));
|
||||
if (!ndev)
|
||||
return -ENOMEM;
|
||||
if (!ndev) {
|
||||
ret = -ENOMEM;
|
||||
goto out_regulator_disable;
|
||||
}
|
||||
|
||||
SET_NETDEV_DEV(ndev, &pdev->dev);
|
||||
|
||||
|
|
|
@ -1173,19 +1173,13 @@ static int __ibmvnic_open(struct net_device *netdev)
|
|||
|
||||
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
|
||||
if (rc) {
|
||||
for (i = 0; i < adapter->req_rx_queues; i++)
|
||||
napi_disable(&adapter->napi[i]);
|
||||
ibmvnic_napi_disable(adapter);
|
||||
release_resources(adapter);
|
||||
return rc;
|
||||
}
|
||||
|
||||
netif_tx_start_all_queues(netdev);
|
||||
|
||||
if (prev_state == VNIC_CLOSED) {
|
||||
for (i = 0; i < adapter->req_rx_queues; i++)
|
||||
napi_schedule(&adapter->napi[i]);
|
||||
}
|
||||
|
||||
adapter->state = VNIC_OPEN;
|
||||
return rc;
|
||||
}
|
||||
|
@ -1967,7 +1961,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||
u64 old_num_rx_queues, old_num_tx_queues;
|
||||
u64 old_num_rx_slots, old_num_tx_slots;
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
int i, rc;
|
||||
int rc;
|
||||
|
||||
netdev_dbg(adapter->netdev,
|
||||
"[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
|
||||
|
@ -2158,10 +2152,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||
/* refresh device's multicast list */
|
||||
ibmvnic_set_multi(netdev);
|
||||
|
||||
/* kick napi */
|
||||
for (i = 0; i < adapter->req_rx_queues; i++)
|
||||
napi_schedule(&adapter->napi[i]);
|
||||
|
||||
if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
|
||||
adapter->reset_reason == VNIC_RESET_MOBILITY)
|
||||
__netdev_notify_peers(netdev);
|
||||
|
@ -3256,9 +3246,6 @@ restart_loop:
|
|||
|
||||
next = ibmvnic_next_scrq(adapter, scrq);
|
||||
for (i = 0; i < next->tx_comp.num_comps; i++) {
|
||||
if (next->tx_comp.rcs[i])
|
||||
dev_err(dev, "tx error %x\n",
|
||||
next->tx_comp.rcs[i]);
|
||||
index = be32_to_cpu(next->tx_comp.correlators[i]);
|
||||
if (index & IBMVNIC_TSO_POOL_MASK) {
|
||||
tx_pool = &adapter->tso_pool[pool];
|
||||
|
@ -3272,7 +3259,13 @@ restart_loop:
|
|||
num_entries += txbuff->num_entries;
|
||||
if (txbuff->skb) {
|
||||
total_bytes += txbuff->skb->len;
|
||||
dev_consume_skb_irq(txbuff->skb);
|
||||
if (next->tx_comp.rcs[i]) {
|
||||
dev_err(dev, "tx error %x\n",
|
||||
next->tx_comp.rcs[i]);
|
||||
dev_kfree_skb_irq(txbuff->skb);
|
||||
} else {
|
||||
dev_consume_skb_irq(txbuff->skb);
|
||||
}
|
||||
txbuff->skb = NULL;
|
||||
} else {
|
||||
netdev_warn(adapter->netdev,
|
||||
|
|
|
@ -12357,6 +12357,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
|
|||
{
|
||||
int err = 0;
|
||||
int size;
|
||||
u16 pow;
|
||||
|
||||
/* Set default capability flags */
|
||||
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
|
||||
|
@ -12375,6 +12376,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
|
|||
pf->rss_table_size = pf->hw.func_caps.rss_table_size;
|
||||
pf->rss_size_max = min_t(int, pf->rss_size_max,
|
||||
pf->hw.func_caps.num_tx_qp);
|
||||
|
||||
/* find the next higher power-of-2 of num cpus */
|
||||
pow = roundup_pow_of_two(num_online_cpus());
|
||||
pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
|
||||
|
||||
if (pf->hw.func_caps.rss) {
|
||||
pf->flags |= I40E_FLAG_RSS_ENABLED;
|
||||
pf->alloc_rss_size = min_t(int, pf->rss_size_max,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue