Merge linux 6.6.57
Conflicts: drivers/scsi/sd.c Signed-off-by: Jianping Liu <frankjpliu@tencent.com>
This commit is contained in:
commit
ba131a89d7
|
@ -255,9 +255,21 @@ Contributing new tests (details)
|
|||
|
||||
TEST_PROGS_EXTENDED, TEST_GEN_PROGS_EXTENDED mean it is the
|
||||
executable which is not tested by default.
|
||||
|
||||
TEST_FILES, TEST_GEN_FILES mean it is the file which is used by
|
||||
test.
|
||||
|
||||
TEST_INCLUDES is similar to TEST_FILES, it lists files which should be
|
||||
included when exporting or installing the tests, with the following
|
||||
differences:
|
||||
|
||||
* symlinks to files in other directories are preserved
|
||||
* the part of paths below tools/testing/selftests/ is preserved when
|
||||
copying the files to the output directory
|
||||
|
||||
TEST_INCLUDES is meant to list dependencies located in other directories of
|
||||
the selftests hierarchy.
|
||||
|
||||
* First use the headers inside the kernel source and/or git repo, and then the
|
||||
system headers. Headers for the kernel release as opposed to headers
|
||||
installed by the distro on the system should be the primary focus to be able
|
||||
|
|
2
Makefile
2
Makefile
|
@ -8,7 +8,7 @@ else
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 56
|
||||
SUBLEVEL = 57
|
||||
EXTRAVERSION =
|
||||
NAME = Pinguïn Aangedreven
|
||||
|
||||
|
|
|
@ -374,6 +374,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
|
|||
if (bus) {
|
||||
memcpy(bus->sysdata, info->cfg, sizeof(struct pci_config_window));
|
||||
kfree(info);
|
||||
kfree(root_ops);
|
||||
} else {
|
||||
struct pci_bus *child;
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ CONFIG_PS3_VRAM=m
|
|||
CONFIG_PS3_LPM=m
|
||||
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
|
||||
CONFIG_KEXEC=y
|
||||
# CONFIG_PPC64_BIG_ENDIAN_ELF_ABI_V2 is not set
|
||||
CONFIG_PPC_4K_PAGES=y
|
||||
CONFIG_SCHED_SMT=y
|
||||
CONFIG_PM=y
|
||||
|
|
|
@ -327,6 +327,8 @@ static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1
|
|||
static inline void sbi_init(void) {}
|
||||
#endif /* CONFIG_RISCV_SBI */
|
||||
|
||||
unsigned long riscv_get_mvendorid(void);
|
||||
unsigned long riscv_get_marchid(void);
|
||||
unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
|
||||
unsigned long riscv_cached_marchid(unsigned int cpu_id);
|
||||
unsigned long riscv_cached_mimpid(unsigned int cpu_id);
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#ifdef CONFIG_64BIT
|
||||
#define MAX_PHYSMEM_BITS 56
|
||||
#else
|
||||
#define MAX_PHYSMEM_BITS 34
|
||||
#define MAX_PHYSMEM_BITS 32
|
||||
#endif /* CONFIG_64BIT */
|
||||
#define SECTION_SIZE_BITS 27
|
||||
#endif /* CONFIG_SPARSEMEM */
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
|
||||
#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
|
||||
#define OVERFLOW_STACK_SIZE SZ_4K
|
||||
#define SHADOW_OVERFLOW_STACK_SIZE (1024)
|
||||
|
||||
#define IRQ_STACK_SIZE THREAD_SIZE
|
||||
|
||||
|
|
|
@ -139,6 +139,34 @@ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
|
|||
return -1;
|
||||
}
|
||||
|
||||
unsigned long __init riscv_get_marchid(void)
|
||||
{
|
||||
struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
|
||||
|
||||
#if IS_ENABLED(CONFIG_RISCV_SBI)
|
||||
ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
|
||||
#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
|
||||
ci->marchid = csr_read(CSR_MARCHID);
|
||||
#else
|
||||
ci->marchid = 0;
|
||||
#endif
|
||||
return ci->marchid;
|
||||
}
|
||||
|
||||
unsigned long __init riscv_get_mvendorid(void)
|
||||
{
|
||||
struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
|
||||
|
||||
#if IS_ENABLED(CONFIG_RISCV_SBI)
|
||||
ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
|
||||
#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
|
||||
ci->mvendorid = csr_read(CSR_MVENDORID);
|
||||
#else
|
||||
ci->mvendorid = 0;
|
||||
#endif
|
||||
return ci->mvendorid;
|
||||
}
|
||||
|
||||
DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
|
||||
|
||||
unsigned long riscv_cached_mvendorid(unsigned int cpu_id)
|
||||
|
@ -170,12 +198,16 @@ static int riscv_cpuinfo_starting(unsigned int cpu)
|
|||
struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
|
||||
|
||||
#if IS_ENABLED(CONFIG_RISCV_SBI)
|
||||
ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
|
||||
ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
|
||||
if (!ci->mvendorid)
|
||||
ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
|
||||
if (!ci->marchid)
|
||||
ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
|
||||
ci->mimpid = sbi_spec_is_0_1() ? 0 : sbi_get_mimpid();
|
||||
#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
|
||||
ci->mvendorid = csr_read(CSR_MVENDORID);
|
||||
ci->marchid = csr_read(CSR_MARCHID);
|
||||
if (!ci->mvendorid)
|
||||
ci->mvendorid = csr_read(CSR_MVENDORID);
|
||||
if (!ci->marchid)
|
||||
ci->marchid = csr_read(CSR_MARCHID);
|
||||
ci->mimpid = csr_read(CSR_MIMPID);
|
||||
#else
|
||||
ci->mvendorid = 0;
|
||||
|
|
|
@ -351,6 +351,8 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
|
|||
struct acpi_table_header *rhct;
|
||||
acpi_status status;
|
||||
unsigned int cpu;
|
||||
u64 boot_vendorid;
|
||||
u64 boot_archid;
|
||||
|
||||
if (!acpi_disabled) {
|
||||
status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
|
||||
|
@ -358,6 +360,9 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
|
|||
return;
|
||||
}
|
||||
|
||||
boot_vendorid = riscv_get_mvendorid();
|
||||
boot_archid = riscv_get_marchid();
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct riscv_isainfo *isainfo = &hart_isa[cpu];
|
||||
unsigned long this_hwcap = 0;
|
||||
|
@ -405,8 +410,7 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
|
|||
* CPU cores with the ratified spec will contain non-zero
|
||||
* marchid.
|
||||
*/
|
||||
if (acpi_disabled && riscv_cached_mvendorid(cpu) == THEAD_VENDOR_ID &&
|
||||
riscv_cached_marchid(cpu) == 0x0) {
|
||||
if (acpi_disabled && boot_vendorid == THEAD_VENDOR_ID && boot_archid == 0x0) {
|
||||
this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v];
|
||||
clear_bit(RISCV_ISA_EXT_v, isainfo->isa);
|
||||
}
|
||||
|
|
|
@ -444,6 +444,12 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
|
|||
*(u32 *)loc = CLEAN_IMM(CJTYPE, *(u32 *)loc) |
|
||||
ENCODE_CJTYPE_IMM(val - addr);
|
||||
break;
|
||||
case R_RISCV_ADD16:
|
||||
*(u16 *)loc += val;
|
||||
break;
|
||||
case R_RISCV_SUB16:
|
||||
*(u16 *)loc -= val;
|
||||
break;
|
||||
case R_RISCV_ADD32:
|
||||
*(u32 *)loc += val;
|
||||
break;
|
||||
|
|
|
@ -219,8 +219,8 @@ SYM_CODE_START(ret_from_fork)
|
|||
jalr s0
|
||||
1:
|
||||
move a0, sp /* pt_regs */
|
||||
la ra, ret_from_exception
|
||||
tail syscall_exit_to_user_mode
|
||||
call syscall_exit_to_user_mode
|
||||
j ret_from_exception
|
||||
SYM_CODE_END(ret_from_fork)
|
||||
|
||||
/*
|
||||
|
|
|
@ -60,8 +60,10 @@ static inline int test_facility(unsigned long nr)
|
|||
unsigned long facilities_als[] = { FACILITIES_ALS };
|
||||
|
||||
if (__builtin_constant_p(nr) && nr < sizeof(facilities_als) * 8) {
|
||||
if (__test_facility(nr, &facilities_als))
|
||||
return 1;
|
||||
if (__test_facility(nr, &facilities_als)) {
|
||||
if (!__is_defined(__DECOMPRESSOR))
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return __test_facility(nr, &stfle_fac_list);
|
||||
}
|
||||
|
|
|
@ -16,8 +16,10 @@
|
|||
#include <asm/pci_io.h>
|
||||
|
||||
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
|
||||
#define kc_xlate_dev_mem_ptr xlate_dev_mem_ptr
|
||||
void *xlate_dev_mem_ptr(phys_addr_t phys);
|
||||
#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
|
||||
#define kc_unxlate_dev_mem_ptr unxlate_dev_mem_ptr
|
||||
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
|
||||
|
||||
#define IO_SPACE_LIMIT 0
|
||||
|
|
|
@ -1463,7 +1463,7 @@ static int aux_output_begin(struct perf_output_handle *handle,
|
|||
unsigned long range, i, range_scan, idx, head, base, offset;
|
||||
struct hws_trailer_entry *te;
|
||||
|
||||
if (WARN_ON_ONCE(handle->head & ~PAGE_MASK))
|
||||
if (handle->head & ~PAGE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
aux->head = handle->head >> PAGE_SHIFT;
|
||||
|
@ -1642,7 +1642,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
|
|||
unsigned long num_sdb;
|
||||
|
||||
aux = perf_get_aux(handle);
|
||||
if (WARN_ON_ONCE(!aux))
|
||||
if (!aux)
|
||||
return;
|
||||
|
||||
/* Inform user space new data arrived */
|
||||
|
@ -1661,7 +1661,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
|
|||
num_sdb);
|
||||
break;
|
||||
}
|
||||
if (WARN_ON_ONCE(!aux))
|
||||
if (!aux)
|
||||
return;
|
||||
|
||||
/* Update head and alert_mark to new position */
|
||||
|
@ -1896,12 +1896,8 @@ static void cpumsf_pmu_start(struct perf_event *event, int flags)
|
|||
{
|
||||
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
|
||||
|
||||
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
|
||||
if (!(event->hw.state & PERF_HES_STOPPED))
|
||||
return;
|
||||
|
||||
if (flags & PERF_EF_RELOAD)
|
||||
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
|
||||
|
||||
perf_pmu_disable(event->pmu);
|
||||
event->hw.state = 0;
|
||||
cpuhw->lsctl.cs = 1;
|
||||
|
|
|
@ -95,11 +95,12 @@ static long cmm_alloc_pages(long nr, long *counter,
|
|||
(*counter)++;
|
||||
spin_unlock(&cmm_lock);
|
||||
nr--;
|
||||
cond_resched();
|
||||
}
|
||||
return nr;
|
||||
}
|
||||
|
||||
static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
|
||||
static long __cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
|
||||
{
|
||||
struct cmm_page_array *pa;
|
||||
unsigned long addr;
|
||||
|
@ -123,6 +124,21 @@ static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
|
|||
return nr;
|
||||
}
|
||||
|
||||
static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
|
||||
{
|
||||
long inc = 0;
|
||||
|
||||
while (nr) {
|
||||
inc = min(256L, nr);
|
||||
nr -= inc;
|
||||
inc = __cmm_free_pages(inc, counter, list);
|
||||
if (inc)
|
||||
break;
|
||||
cond_resched();
|
||||
}
|
||||
return nr + inc;
|
||||
}
|
||||
|
||||
static int cmm_oom_notify(struct notifier_block *self,
|
||||
unsigned long dummy, void *parm)
|
||||
{
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#define PCI_DEVICE_ID_AMD_19H_M70H_ROOT 0x14e8
|
||||
#define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT 0x153a
|
||||
#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
|
||||
#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122
|
||||
#define PCI_DEVICE_ID_AMD_MI200_ROOT 0x14bb
|
||||
|
||||
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
|
||||
|
@ -70,6 +71,7 @@ static const struct pci_device_id amd_root_ids[] = {
|
|||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) },
|
||||
{}
|
||||
};
|
||||
|
@ -101,6 +103,8 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
|
|||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) },
|
||||
{}
|
||||
};
|
||||
|
|
|
@ -58,6 +58,56 @@ static bool is_imm8(int value)
|
|||
return value <= 127 && value >= -128;
|
||||
}
|
||||
|
||||
/*
|
||||
* Let us limit the positive offset to be <= 123.
|
||||
* This is to ensure eventual jit convergence For the following patterns:
|
||||
* ...
|
||||
* pass4, final_proglen=4391:
|
||||
* ...
|
||||
* 20e: 48 85 ff test rdi,rdi
|
||||
* 211: 74 7d je 0x290
|
||||
* 213: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
|
||||
* ...
|
||||
* 289: 48 85 ff test rdi,rdi
|
||||
* 28c: 74 17 je 0x2a5
|
||||
* 28e: e9 7f ff ff ff jmp 0x212
|
||||
* 293: bf 03 00 00 00 mov edi,0x3
|
||||
* Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125)
|
||||
* and insn at 0x28e is 5-byte jmp insn with offset -129.
|
||||
*
|
||||
* pass5, final_proglen=4392:
|
||||
* ...
|
||||
* 20e: 48 85 ff test rdi,rdi
|
||||
* 211: 0f 84 80 00 00 00 je 0x297
|
||||
* 217: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
|
||||
* ...
|
||||
* 28d: 48 85 ff test rdi,rdi
|
||||
* 290: 74 1a je 0x2ac
|
||||
* 292: eb 84 jmp 0x218
|
||||
* 294: bf 03 00 00 00 mov edi,0x3
|
||||
* Note that insn at 0x211 is 6-byte cond jump insn now since its offset
|
||||
* becomes 0x80 based on previous round (0x293 - 0x213 = 0x80).
|
||||
* At the same time, insn at 0x292 is a 2-byte insn since its offset is
|
||||
* -124.
|
||||
*
|
||||
* pass6 will repeat the same code as in pass4 and this will prevent
|
||||
* eventual convergence.
|
||||
*
|
||||
* To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes)
|
||||
* cycle in the above. In the above example je offset <= 0x7c should work.
|
||||
*
|
||||
* For other cases, je <-> je needs offset <= 0x7b to avoid no convergence
|
||||
* issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should
|
||||
* avoid no convergence issue.
|
||||
*
|
||||
* Overall, let us limit the positive offset for 8bit cond/uncond jmp insn
|
||||
* to maximum 123 (0x7b). This way, the jit pass can eventually converge.
|
||||
*/
|
||||
static bool is_imm8_jmp_offset(int value)
|
||||
{
|
||||
return value <= 123 && value >= -128;
|
||||
}
|
||||
|
||||
static bool is_simm32(s64 value)
|
||||
{
|
||||
return value == (s64)(s32)value;
|
||||
|
@ -1774,7 +1824,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
|
|||
return -EFAULT;
|
||||
}
|
||||
jmp_offset = addrs[i + insn->off] - addrs[i];
|
||||
if (is_imm8(jmp_offset)) {
|
||||
if (is_imm8_jmp_offset(jmp_offset)) {
|
||||
if (jmp_padding) {
|
||||
/* To keep the jmp_offset valid, the extra bytes are
|
||||
* padded before the jump insn, so we subtract the
|
||||
|
@ -1856,7 +1906,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
|
|||
break;
|
||||
}
|
||||
emit_jmp:
|
||||
if (is_imm8(jmp_offset)) {
|
||||
if (is_imm8_jmp_offset(jmp_offset)) {
|
||||
if (jmp_padding) {
|
||||
/* To avoid breaking jmp_offset, the extra bytes
|
||||
* are padded before the actual jmp insn, so
|
||||
|
|
|
@ -667,6 +667,87 @@ static int mobile_lpm_policy = -1;
|
|||
module_param(mobile_lpm_policy, int, 0644);
|
||||
MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
|
||||
|
||||
static char *ahci_mask_port_map;
|
||||
module_param_named(mask_port_map, ahci_mask_port_map, charp, 0444);
|
||||
MODULE_PARM_DESC(mask_port_map,
|
||||
"32-bits port map masks to ignore controllers ports. "
|
||||
"Valid values are: "
|
||||
"\"<mask>\" to apply the same mask to all AHCI controller "
|
||||
"devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
|
||||
"specify different masks for the controllers specified, "
|
||||
"where <pci_dev> is the PCI ID of an AHCI controller in the "
|
||||
"form \"domain:bus:dev.func\"");
|
||||
|
||||
static void ahci_apply_port_map_mask(struct device *dev,
|
||||
struct ahci_host_priv *hpriv, char *mask_s)
|
||||
{
|
||||
unsigned int mask;
|
||||
|
||||
if (kstrtouint(mask_s, 0, &mask)) {
|
||||
dev_err(dev, "Invalid port map mask\n");
|
||||
return;
|
||||
}
|
||||
|
||||
hpriv->mask_port_map = mask;
|
||||
}
|
||||
|
||||
static void ahci_get_port_map_mask(struct device *dev,
|
||||
struct ahci_host_priv *hpriv)
|
||||
{
|
||||
char *param, *end, *str, *mask_s;
|
||||
char *name;
|
||||
|
||||
if (!strlen(ahci_mask_port_map))
|
||||
return;
|
||||
|
||||
str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
|
||||
if (!str)
|
||||
return;
|
||||
|
||||
/* Handle single mask case */
|
||||
if (!strchr(str, '=')) {
|
||||
ahci_apply_port_map_mask(dev, hpriv, str);
|
||||
goto free;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mask list case: parse the parameter to apply the mask only if
|
||||
* the device name matches.
|
||||
*/
|
||||
param = str;
|
||||
end = param + strlen(param);
|
||||
while (param && param < end && *param) {
|
||||
name = param;
|
||||
param = strchr(name, '=');
|
||||
if (!param)
|
||||
break;
|
||||
|
||||
*param = '\0';
|
||||
param++;
|
||||
if (param >= end)
|
||||
break;
|
||||
|
||||
if (strcmp(dev_name(dev), name) != 0) {
|
||||
param = strchr(param, ',');
|
||||
if (param)
|
||||
param++;
|
||||
continue;
|
||||
}
|
||||
|
||||
mask_s = param;
|
||||
param = strchr(mask_s, ',');
|
||||
if (param) {
|
||||
*param = '\0';
|
||||
param++;
|
||||
}
|
||||
|
||||
ahci_apply_port_map_mask(dev, hpriv, mask_s);
|
||||
}
|
||||
|
||||
free:
|
||||
kfree(str);
|
||||
}
|
||||
|
||||
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
|
||||
struct ahci_host_priv *hpriv)
|
||||
{
|
||||
|
@ -689,6 +770,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
|
|||
"Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
|
||||
}
|
||||
|
||||
/* Handle port map masks passed as module parameter. */
|
||||
if (ahci_mask_port_map)
|
||||
ahci_get_port_map_mask(&pdev->dev, hpriv);
|
||||
|
||||
ahci_save_initial_config(&pdev->dev, hpriv);
|
||||
}
|
||||
|
||||
|
|
|
@ -4056,10 +4056,20 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
|
|||
|
||||
WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
|
||||
|
||||
/* Set all devices attached to the port in standby mode */
|
||||
ata_for_each_link(link, ap, HOST_FIRST) {
|
||||
ata_for_each_dev(dev, link, ENABLED)
|
||||
ata_dev_power_set_standby(dev);
|
||||
/*
|
||||
* We will reach this point for all of the PM events:
|
||||
* PM_EVENT_SUSPEND (if runtime pm, PM_EVENT_AUTO will also be set)
|
||||
* PM_EVENT_FREEZE, and PM_EVENT_HIBERNATE.
|
||||
*
|
||||
* We do not want to perform disk spin down for PM_EVENT_FREEZE.
|
||||
* (Spin down will be performed by the subsequent PM_EVENT_HIBERNATE.)
|
||||
*/
|
||||
if (!(ap->pm_mesg.event & PM_EVENT_FREEZE)) {
|
||||
/* Set all devices attached to the port in standby mode */
|
||||
ata_for_each_link(link, ap, HOST_FIRST) {
|
||||
ata_for_each_dev(dev, link, ENABLED)
|
||||
ata_dev_power_set_standby(dev);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -152,7 +152,8 @@ static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
|
|||
{
|
||||
struct bus_attribute *bus_attr = to_bus_attr(attr);
|
||||
struct subsys_private *subsys_priv = to_subsys_private(kobj);
|
||||
ssize_t ret = 0;
|
||||
/* return -EIO for reading a bus attribute without show() */
|
||||
ssize_t ret = -EIO;
|
||||
|
||||
if (bus_attr->show)
|
||||
ret = bus_attr->show(subsys_priv->bus, buf);
|
||||
|
@ -164,7 +165,8 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
|
|||
{
|
||||
struct bus_attribute *bus_attr = to_bus_attr(attr);
|
||||
struct subsys_private *subsys_priv = to_subsys_private(kobj);
|
||||
ssize_t ret = 0;
|
||||
/* return -EIO for writing a bus attribute without store() */
|
||||
ssize_t ret = -EIO;
|
||||
|
||||
if (bus_attr->store)
|
||||
ret = bus_attr->store(subsys_priv->bus, buf, count);
|
||||
|
@ -920,6 +922,8 @@ bus_devices_fail:
|
|||
bus_remove_file(bus, &bus_attr_uevent);
|
||||
bus_uevent_fail:
|
||||
kset_unregister(&priv->subsys);
|
||||
/* Above kset_unregister() will kfree @priv */
|
||||
priv = NULL;
|
||||
out:
|
||||
kfree(priv);
|
||||
return retval;
|
||||
|
|
|
@ -1983,6 +1983,13 @@ static void zram_destroy_comps(struct zram *zram)
|
|||
zcomp_destroy(comp);
|
||||
zram->num_active_comps--;
|
||||
}
|
||||
|
||||
for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
|
||||
/* Do not free statically defined compression algorithms */
|
||||
if (zram->comp_algs[prio] != default_compressor)
|
||||
kfree(zram->comp_algs[prio]);
|
||||
zram->comp_algs[prio] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void zram_reset_device(struct zram *zram)
|
||||
|
|
|
@ -159,6 +159,7 @@ struct mhi_ep_chan {
|
|||
void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result);
|
||||
enum mhi_ch_state state;
|
||||
enum dma_data_direction dir;
|
||||
size_t rd_offset;
|
||||
u64 tre_loc;
|
||||
u32 tre_size;
|
||||
u32 tre_bytes_left;
|
||||
|
|
|
@ -74,7 +74,7 @@ static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct m
|
|||
struct mhi_ring_element *event;
|
||||
int ret;
|
||||
|
||||
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
|
||||
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
|
||||
if (!event)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -93,7 +93,7 @@ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_stat
|
|||
struct mhi_ring_element *event;
|
||||
int ret;
|
||||
|
||||
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
|
||||
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
|
||||
if (!event)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -111,7 +111,7 @@ int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_e
|
|||
struct mhi_ring_element *event;
|
||||
int ret;
|
||||
|
||||
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
|
||||
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
|
||||
if (!event)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -130,7 +130,7 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e
|
|||
struct mhi_ring_element *event;
|
||||
int ret;
|
||||
|
||||
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
|
||||
event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
|
||||
if (!event)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -183,6 +183,8 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
|
|||
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
mhi_chan->rd_offset = ch_ring->rd_offset;
|
||||
}
|
||||
|
||||
/* Set channel state to RUNNING */
|
||||
|
@ -312,21 +314,85 @@ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_directio
|
|||
struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
|
||||
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
|
||||
|
||||
return !!(ring->rd_offset == ring->wr_offset);
|
||||
return !!(mhi_chan->rd_offset == ring->wr_offset);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
|
||||
|
||||
static void mhi_ep_read_completion(struct mhi_ep_buf_info *buf_info)
|
||||
{
|
||||
struct mhi_ep_device *mhi_dev = buf_info->mhi_dev;
|
||||
struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
|
||||
struct mhi_ep_chan *mhi_chan = mhi_dev->ul_chan;
|
||||
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
|
||||
struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset];
|
||||
struct mhi_result result = {};
|
||||
int ret;
|
||||
|
||||
if (mhi_chan->xfer_cb) {
|
||||
result.buf_addr = buf_info->cb_buf;
|
||||
result.dir = mhi_chan->dir;
|
||||
result.bytes_xferd = buf_info->size;
|
||||
|
||||
mhi_chan->xfer_cb(mhi_dev, &result);
|
||||
}
|
||||
|
||||
/*
|
||||
* The host will split the data packet into multiple TREs if it can't fit
|
||||
* the packet in a single TRE. In that case, CHAIN flag will be set by the
|
||||
* host for all TREs except the last one.
|
||||
*/
|
||||
if (buf_info->code != MHI_EV_CC_OVERFLOW) {
|
||||
if (MHI_TRE_DATA_GET_CHAIN(el)) {
|
||||
/*
|
||||
* IEOB (Interrupt on End of Block) flag will be set by the host if
|
||||
* it expects the completion event for all TREs of a TD.
|
||||
*/
|
||||
if (MHI_TRE_DATA_GET_IEOB(el)) {
|
||||
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
|
||||
MHI_TRE_DATA_GET_LEN(el),
|
||||
MHI_EV_CC_EOB);
|
||||
if (ret < 0) {
|
||||
dev_err(&mhi_chan->mhi_dev->dev,
|
||||
"Error sending transfer compl. event\n");
|
||||
goto err_free_tre_buf;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* IEOT (Interrupt on End of Transfer) flag will be set by the host
|
||||
* for the last TRE of the TD and expects the completion event for
|
||||
* the same.
|
||||
*/
|
||||
if (MHI_TRE_DATA_GET_IEOT(el)) {
|
||||
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
|
||||
MHI_TRE_DATA_GET_LEN(el),
|
||||
MHI_EV_CC_EOT);
|
||||
if (ret < 0) {
|
||||
dev_err(&mhi_chan->mhi_dev->dev,
|
||||
"Error sending transfer compl. event\n");
|
||||
goto err_free_tre_buf;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mhi_ep_ring_inc_index(ring);
|
||||
|
||||
err_free_tre_buf:
|
||||
kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_info->cb_buf);
|
||||
}
|
||||
|
||||
static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
|
||||
struct mhi_ep_ring *ring,
|
||||
struct mhi_result *result,
|
||||
u32 len)
|
||||
struct mhi_ep_ring *ring)
|
||||
{
|
||||
struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
size_t tr_len, read_offset, write_offset;
|
||||
struct mhi_ep_buf_info buf_info = {};
|
||||
u32 len = MHI_EP_DEFAULT_MTU;
|
||||
struct mhi_ring_element *el;
|
||||
bool tr_done = false;
|
||||
void *buf_addr;
|
||||
u32 buf_left;
|
||||
int ret;
|
||||
|
||||
|
@ -339,7 +405,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
el = &ring->ring_cache[ring->rd_offset];
|
||||
el = &ring->ring_cache[mhi_chan->rd_offset];
|
||||
|
||||
/* Check if there is data pending to be read from previous read operation */
|
||||
if (mhi_chan->tre_bytes_left) {
|
||||
|
@ -356,82 +422,50 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
|
|||
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
|
||||
write_offset = len - buf_left;
|
||||
|
||||
buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL);
|
||||
if (!buf_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
buf_info.host_addr = mhi_chan->tre_loc + read_offset;
|
||||
buf_info.dev_addr = result->buf_addr + write_offset;
|
||||
buf_info.dev_addr = buf_addr + write_offset;
|
||||
buf_info.size = tr_len;
|
||||
buf_info.cb = mhi_ep_read_completion;
|
||||
buf_info.cb_buf = buf_addr;
|
||||
buf_info.mhi_dev = mhi_chan->mhi_dev;
|
||||
|
||||
if (mhi_chan->tre_bytes_left - tr_len)
|
||||
buf_info.code = MHI_EV_CC_OVERFLOW;
|
||||
|
||||
dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
|
||||
ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
|
||||
ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info);
|
||||
if (ret < 0) {
|
||||
dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
|
||||
return ret;
|
||||
goto err_free_buf_addr;
|
||||
}
|
||||
|
||||
buf_left -= tr_len;
|
||||
mhi_chan->tre_bytes_left -= tr_len;
|
||||
|
||||
/*
|
||||
* Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
|
||||
* read completely:
|
||||
*
|
||||
* 1. Send completion event to the host based on the flags set in TRE.
|
||||
* 2. Increment the local read offset of the transfer ring.
|
||||
*/
|
||||
if (!mhi_chan->tre_bytes_left) {
|
||||
/*
|
||||
* The host will split the data packet into multiple TREs if it can't fit
|
||||
* the packet in a single TRE. In that case, CHAIN flag will be set by the
|
||||
* host for all TREs except the last one.
|
||||
*/
|
||||
if (MHI_TRE_DATA_GET_CHAIN(el)) {
|
||||
/*
|
||||
* IEOB (Interrupt on End of Block) flag will be set by the host if
|
||||
* it expects the completion event for all TREs of a TD.
|
||||
*/
|
||||
if (MHI_TRE_DATA_GET_IEOB(el)) {
|
||||
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
|
||||
MHI_TRE_DATA_GET_LEN(el),
|
||||
MHI_EV_CC_EOB);
|
||||
if (ret < 0) {
|
||||
dev_err(&mhi_chan->mhi_dev->dev,
|
||||
"Error sending transfer compl. event\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* IEOT (Interrupt on End of Transfer) flag will be set by the host
|
||||
* for the last TRE of the TD and expects the completion event for
|
||||
* the same.
|
||||
*/
|
||||
if (MHI_TRE_DATA_GET_IEOT(el)) {
|
||||
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
|
||||
MHI_TRE_DATA_GET_LEN(el),
|
||||
MHI_EV_CC_EOT);
|
||||
if (ret < 0) {
|
||||
dev_err(&mhi_chan->mhi_dev->dev,
|
||||
"Error sending transfer compl. event\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (MHI_TRE_DATA_GET_IEOT(el))
|
||||
tr_done = true;
|
||||
}
|
||||
|
||||
mhi_ep_ring_inc_index(ring);
|
||||
mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
|
||||
}
|
||||
|
||||
result->bytes_xferd += tr_len;
|
||||
} while (buf_left && !tr_done);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_buf_addr:
|
||||
kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_addr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
|
||||
static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
|
||||
{
|
||||
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
|
||||
struct mhi_result result = {};
|
||||
u32 len = MHI_EP_DEFAULT_MTU;
|
||||
struct mhi_ep_chan *mhi_chan;
|
||||
int ret;
|
||||
|
||||
|
@ -452,32 +486,49 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem
|
|||
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
|
||||
} else {
|
||||
/* UL channel */
|
||||
result.buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
|
||||
if (!result.buf_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
do {
|
||||
ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
|
||||
ret = mhi_ep_read_channel(mhi_cntrl, ring);
|
||||
if (ret < 0) {
|
||||
dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
|
||||
kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
result.dir = mhi_chan->dir;
|
||||
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
|
||||
result.bytes_xferd = 0;
|
||||
memset(result.buf_addr, 0, len);
|
||||
|
||||
/* Read until the ring becomes empty */
|
||||
} while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
|
||||
|
||||
kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mhi_ep_skb_completion(struct mhi_ep_buf_info *buf_info)
|
||||
{
|
||||
struct mhi_ep_device *mhi_dev = buf_info->mhi_dev;
|
||||
struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
|
||||
struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
|
||||
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
|
||||
struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset];
|
||||
struct device *dev = &mhi_dev->dev;
|
||||
struct mhi_result result = {};
|
||||
int ret;
|
||||
|
||||
if (mhi_chan->xfer_cb) {
|
||||
result.buf_addr = buf_info->cb_buf;
|
||||
result.dir = mhi_chan->dir;
|
||||
result.bytes_xferd = buf_info->size;
|
||||
|
||||
mhi_chan->xfer_cb(mhi_dev, &result);
|
||||
}
|
||||
|
||||
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, buf_info->size,
|
||||
buf_info->code);
|
||||
if (ret) {
|
||||
dev_err(dev, "Error sending transfer completion event\n");
|
||||
return;
|
||||
}
|
||||
|
||||
mhi_ep_ring_inc_index(ring);
|
||||
}
|
||||
|
||||
/* TODO: Handle partially formed TDs */
|
||||
int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
|
||||
{
|
||||
|
@ -488,7 +539,6 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
|
|||
struct mhi_ring_element *el;
|
||||
u32 buf_left, read_offset;
|
||||
struct mhi_ep_ring *ring;
|
||||
enum mhi_ev_ccs code;
|
||||
size_t tr_len;
|
||||
u32 tre_len;
|
||||
int ret;
|
||||
|
@ -512,7 +562,7 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
|
|||
goto err_exit;
|
||||
}
|
||||
|
||||
el = &ring->ring_cache[ring->rd_offset];
|
||||
el = &ring->ring_cache[mhi_chan->rd_offset];
|
||||
tre_len = MHI_TRE_DATA_GET_LEN(el);
|
||||
|
||||
tr_len = min(buf_left, tre_len);
|
||||
|
@ -521,33 +571,35 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
|
|||
buf_info.dev_addr = skb->data + read_offset;
|
||||
buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el);
|
||||
buf_info.size = tr_len;
|
||||
buf_info.cb = mhi_ep_skb_completion;
|
||||
buf_info.cb_buf = skb;
|
||||
buf_info.mhi_dev = mhi_dev;
|
||||
|
||||
dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
|
||||
ret = mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Error writing to the channel\n");
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
buf_left -= tr_len;
|
||||
/*
|
||||
* For all TREs queued by the host for DL channel, only the EOT flag will be set.
|
||||
* If the packet doesn't fit into a single TRE, send the OVERFLOW event to
|
||||
* the host so that the host can adjust the packet boundary to next TREs. Else send
|
||||
* the EOT event to the host indicating the packet boundary.
|
||||
*/
|
||||
if (buf_left)
|
||||
code = MHI_EV_CC_OVERFLOW;
|
||||
if (buf_left - tr_len)
|
||||
buf_info.code = MHI_EV_CC_OVERFLOW;
|
||||
else
|
||||
code = MHI_EV_CC_EOT;
|
||||
buf_info.code = MHI_EV_CC_EOT;
|
||||
|
||||
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
|
||||
if (ret) {
|
||||
dev_err(dev, "Error sending transfer completion event\n");
|
||||
dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
|
||||
ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Error writing to the channel\n");
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
mhi_ep_ring_inc_index(ring);
|
||||
buf_left -= tr_len;
|
||||
|
||||
/*
|
||||
* Update the read offset cached in mhi_chan. Actual read offset
|
||||
* will be updated by the completion handler.
|
||||
*/
|
||||
mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
|
||||
} while (buf_left);
|
||||
|
||||
mutex_unlock(&mhi_chan->lock);
|
||||
|
@ -748,7 +800,6 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
|
|||
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
struct mhi_ep_ring_item *itr, *tmp;
|
||||
struct mhi_ring_element *el;
|
||||
struct mhi_ep_ring *ring;
|
||||
struct mhi_ep_chan *chan;
|
||||
unsigned long flags;
|
||||
|
@ -787,16 +838,14 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
|
|||
}
|
||||
|
||||
/* Sanity check to make sure there are elements in the ring */
|
||||
if (ring->rd_offset == ring->wr_offset) {
|
||||
if (chan->rd_offset == ring->wr_offset) {
|
||||
mutex_unlock(&chan->lock);
|
||||
kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
|
||||
continue;
|
||||
}
|
||||
|
||||
el = &ring->ring_cache[ring->rd_offset];
|
||||
|
||||
dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
|
||||
ret = mhi_ep_process_ch_ring(ring, el);
|
||||
ret = mhi_ep_process_ch_ring(ring);
|
||||
if (ret) {
|
||||
dev_err(dev, "Error processing ring for channel (%u): %d\n",
|
||||
ring->ch_id, ret);
|
||||
|
@ -1411,14 +1460,14 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
|
|||
|
||||
mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el",
|
||||
sizeof(struct mhi_ring_element), 0,
|
||||
SLAB_CACHE_DMA, NULL);
|
||||
0, NULL);
|
||||
if (!mhi_cntrl->ev_ring_el_cache) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_cmd;
|
||||
}
|
||||
|
||||
mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0,
|
||||
SLAB_CACHE_DMA, NULL);
|
||||
0, NULL);
|
||||
if (!mhi_cntrl->tre_buf_cache) {
|
||||
ret = -ENOMEM;
|
||||
goto err_destroy_ev_ring_el_cache;
|
||||
|
@ -1431,6 +1480,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
|
|||
ret = -ENOMEM;
|
||||
goto err_destroy_tre_buf_cache;
|
||||
}
|
||||
|
||||
INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
|
||||
INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
|
||||
INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
|
||||
|
|
|
@ -48,7 +48,7 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
|
|||
buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
|
||||
buf_info.dev_addr = &ring->ring_cache[start];
|
||||
|
||||
ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
|
||||
ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else {
|
||||
|
@ -56,7 +56,7 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
|
|||
buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
|
||||
buf_info.dev_addr = &ring->ring_cache[start];
|
||||
|
||||
ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
|
||||
ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -65,7 +65,7 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
|
|||
buf_info.dev_addr = &ring->ring_cache[0];
|
||||
buf_info.size = end * sizeof(struct mhi_ring_element);
|
||||
|
||||
ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
|
||||
ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
|
|||
buf_info.dev_addr = el;
|
||||
buf_info.size = sizeof(*el);
|
||||
|
||||
return mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
|
||||
return mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
|
||||
}
|
||||
|
||||
void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
|
||||
|
|
|
@ -2052,25 +2052,27 @@ static int virtcons_probe(struct virtio_device *vdev)
|
|||
multiport = true;
|
||||
}
|
||||
|
||||
err = init_vqs(portdev);
|
||||
if (err < 0) {
|
||||
dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
|
||||
goto free_chrdev;
|
||||
}
|
||||
|
||||
spin_lock_init(&portdev->ports_lock);
|
||||
INIT_LIST_HEAD(&portdev->ports);
|
||||
INIT_LIST_HEAD(&portdev->list);
|
||||
|
||||
virtio_device_ready(portdev->vdev);
|
||||
|
||||
INIT_WORK(&portdev->config_work, &config_work_handler);
|
||||
INIT_WORK(&portdev->control_work, &control_work_handler);
|
||||
|
||||
if (multiport) {
|
||||
spin_lock_init(&portdev->c_ivq_lock);
|
||||
spin_lock_init(&portdev->c_ovq_lock);
|
||||
}
|
||||
|
||||
err = init_vqs(portdev);
|
||||
if (err < 0) {
|
||||
dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
|
||||
goto free_chrdev;
|
||||
}
|
||||
|
||||
virtio_device_ready(portdev->vdev);
|
||||
|
||||
if (multiport) {
|
||||
err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
|
||||
if (err < 0) {
|
||||
dev_err(&vdev->dev,
|
||||
|
|
|
@ -112,7 +112,7 @@ static void bcm53573_ilp_init(struct device_node *np)
|
|||
goto err_free_ilp;
|
||||
}
|
||||
|
||||
ilp->regmap = syscon_node_to_regmap(of_get_parent(np));
|
||||
ilp->regmap = syscon_node_to_regmap(np->parent);
|
||||
if (IS_ERR(ilp->regmap)) {
|
||||
err = PTR_ERR(ilp->regmap);
|
||||
goto err_free_ilp;
|
||||
|
|
|
@ -498,9 +498,9 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
|
|||
hws[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_hw_mux2_flags("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel), CLK_SET_PARENT_GATE);
|
||||
hws[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_hw_mux2_flags("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel), CLK_SET_PARENT_GATE);
|
||||
hws[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel), CLK_SET_PARENT_GATE);
|
||||
hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel), CLK_SET_PARENT_GATE);
|
||||
hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel));
|
||||
hws[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel), CLK_SET_PARENT_GATE);
|
||||
hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel), CLK_SET_PARENT_GATE);
|
||||
hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel));
|
||||
hws[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_hw_mux2_flags("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel), CLK_SET_PARENT_GATE);
|
||||
hws[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel), CLK_SET_PARENT_GATE);
|
||||
hws[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel), CLK_SET_PARENT_GATE);
|
||||
|
|
|
@ -140,6 +140,11 @@ int main(void)
|
|||
{
|
||||
FILE *fp = fopen("ni_values.py", "w");
|
||||
|
||||
if (fp == NULL) {
|
||||
fprintf(stderr, "Could not open file!");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* write route register values */
|
||||
fprintf(fp, "ni_route_values = {\n");
|
||||
for (int i = 0; ni_all_route_values[i]; ++i)
|
||||
|
|
|
@ -86,7 +86,7 @@ static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
|
|||
nr_pages = 1;
|
||||
|
||||
pgoff = linear_page_index(vmf->vma,
|
||||
ALIGN(vmf->address, fault_size));
|
||||
ALIGN_DOWN(vmf->address, fault_size));
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
|
||||
|
|
|
@ -406,6 +406,8 @@ static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
|
|||
gpio->dcache[GPIO_BANK(offset)] = reg;
|
||||
|
||||
iowrite32(reg, addr);
|
||||
/* Flush write */
|
||||
ioread32(addr);
|
||||
}
|
||||
|
||||
static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
|
||||
|
@ -1191,7 +1193,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
|
|||
if (!gpio_id)
|
||||
return -EINVAL;
|
||||
|
||||
gpio->clk = of_clk_get(pdev->dev.of_node, 0);
|
||||
gpio->clk = devm_clk_get_enabled(&pdev->dev, NULL);
|
||||
if (IS_ERR(gpio->clk)) {
|
||||
dev_warn(&pdev->dev,
|
||||
"Failed to get clock from devicetree, debouncing disabled\n");
|
||||
|
|
|
@ -1691,7 +1691,7 @@ bool dc_validate_boot_timing(const struct dc *dc,
|
|||
if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
|
||||
return false;
|
||||
|
||||
if (!se->funcs->dp_get_pixel_format)
|
||||
if (!se || !se->funcs->dp_get_pixel_format)
|
||||
return false;
|
||||
|
||||
if (!se->funcs->dp_get_pixel_format(
|
||||
|
|
|
@ -432,21 +432,18 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (status == MOD_HDCP_STATUS_SUCCESS)
|
||||
if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
|
||||
&input->bstatus_read, &status,
|
||||
hdcp, "bstatus_read"))
|
||||
goto out;
|
||||
if (status == MOD_HDCP_STATUS_SUCCESS)
|
||||
if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
|
||||
&input->link_integrity_check, &status,
|
||||
hdcp, "link_integrity_check"))
|
||||
goto out;
|
||||
if (status == MOD_HDCP_STATUS_SUCCESS)
|
||||
if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
|
||||
&input->reauth_request_check, &status,
|
||||
hdcp, "reauth_request_check"))
|
||||
goto out;
|
||||
mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
|
||||
&input->bstatus_read, &status,
|
||||
hdcp, "bstatus_read");
|
||||
|
||||
mod_hdcp_execute_and_set(check_link_integrity_dp,
|
||||
&input->link_integrity_check, &status,
|
||||
hdcp, "link_integrity_check");
|
||||
|
||||
mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
|
||||
&input->reauth_request_check, &status,
|
||||
hdcp, "reauth_request_check");
|
||||
|
||||
out:
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -904,6 +904,7 @@ out:
|
|||
connector_set = NULL;
|
||||
fb = NULL;
|
||||
mode = NULL;
|
||||
num_connectors = 0;
|
||||
|
||||
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
|
||||
|
||||
|
|
|
@ -1005,7 +1005,8 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
|
|||
hdcp->value = value;
|
||||
if (update_property) {
|
||||
drm_connector_get(&connector->base);
|
||||
queue_work(i915->unordered_wq, &hdcp->prop_work);
|
||||
if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
|
||||
drm_connector_put(&connector->base);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2480,7 +2481,8 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
|
|||
mutex_lock(&hdcp->mutex);
|
||||
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
|
||||
drm_connector_get(&connector->base);
|
||||
queue_work(i915->unordered_wq, &hdcp->prop_work);
|
||||
if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
|
||||
drm_connector_put(&connector->base);
|
||||
mutex_unlock(&hdcp->mutex);
|
||||
}
|
||||
|
||||
|
@ -2497,7 +2499,9 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
|
|||
*/
|
||||
if (!desired_and_not_enabled && !content_protection_type_changed) {
|
||||
drm_connector_get(&connector->base);
|
||||
queue_work(i915->unordered_wq, &hdcp->prop_work);
|
||||
if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
|
||||
drm_connector_put(&connector->base);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1157,7 +1157,7 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
chan = drm->channel;
|
||||
if (!chan)
|
||||
return -ENODEV;
|
||||
cli = (void *)chan->user.client;
|
||||
cli = chan->cli;
|
||||
push = chan->chan.push;
|
||||
|
||||
s = kzalloc(sizeof(*s), GFP_KERNEL);
|
||||
|
|
|
@ -351,7 +351,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
|
|||
list_add(&chan->head, &abi16->channels);
|
||||
|
||||
/* create channel object and initialise dma and fence management */
|
||||
ret = nouveau_channel_new(drm, device, false, runm, init->fb_ctxdma_handle,
|
||||
ret = nouveau_channel_new(cli, false, runm, init->fb_ctxdma_handle,
|
||||
init->tt_ctxdma_handle, &chan->chan);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
|
|
@ -843,7 +843,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
|
|||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
||||
struct nouveau_channel *chan = drm->ttm.chan;
|
||||
struct nouveau_cli *cli = (void *)chan->user.client;
|
||||
struct nouveau_cli *cli = chan->cli;
|
||||
struct nouveau_fence *fence;
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ static int
|
|||
nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
|
||||
{
|
||||
struct nouveau_channel *chan = container_of(event, typeof(*chan), kill);
|
||||
struct nouveau_cli *cli = (void *)chan->user.client;
|
||||
struct nouveau_cli *cli = chan->cli;
|
||||
|
||||
NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
|
||||
|
||||
|
@ -66,7 +66,7 @@ int
|
|||
nouveau_channel_idle(struct nouveau_channel *chan)
|
||||
{
|
||||
if (likely(chan && chan->fence && !atomic_read(&chan->killed))) {
|
||||
struct nouveau_cli *cli = (void *)chan->user.client;
|
||||
struct nouveau_cli *cli = chan->cli;
|
||||
struct nouveau_fence *fence = NULL;
|
||||
int ret;
|
||||
|
||||
|
@ -142,10 +142,11 @@ nouveau_channel_wait(struct nvif_push *push, u32 size)
|
|||
}
|
||||
|
||||
static int
|
||||
nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
|
||||
nouveau_channel_prep(struct nouveau_cli *cli,
|
||||
u32 size, struct nouveau_channel **pchan)
|
||||
{
|
||||
struct nouveau_cli *cli = (void *)device->object.client;
|
||||
struct nouveau_drm *drm = cli->drm;
|
||||
struct nvif_device *device = &cli->device;
|
||||
struct nv_dma_v0 args = {};
|
||||
struct nouveau_channel *chan;
|
||||
u32 target;
|
||||
|
@ -155,6 +156,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
|
|||
if (!chan)
|
||||
return -ENOMEM;
|
||||
|
||||
chan->cli = cli;
|
||||
chan->device = device;
|
||||
chan->drm = drm;
|
||||
chan->vmm = nouveau_cli_vmm(cli);
|
||||
|
@ -254,7 +256,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
|
|||
}
|
||||
|
||||
static int
|
||||
nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool priv, u64 runm,
|
||||
nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
|
||||
struct nouveau_channel **pchan)
|
||||
{
|
||||
const struct nvif_mclass hosts[] = {
|
||||
|
@ -279,7 +281,7 @@ nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool p
|
|||
struct nvif_chan_v0 chan;
|
||||
char name[TASK_COMM_LEN+16];
|
||||
} args;
|
||||
struct nouveau_cli *cli = (void *)device->object.client;
|
||||
struct nvif_device *device = &cli->device;
|
||||
struct nouveau_channel *chan;
|
||||
const u64 plength = 0x10000;
|
||||
const u64 ioffset = plength;
|
||||
|
@ -298,7 +300,7 @@ nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool p
|
|||
size = ioffset + ilength;
|
||||
|
||||
/* allocate dma push buffer */
|
||||
ret = nouveau_channel_prep(drm, device, size, &chan);
|
||||
ret = nouveau_channel_prep(cli, size, &chan);
|
||||
*pchan = chan;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -493,13 +495,12 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
|
|||
}
|
||||
|
||||
int
|
||||
nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
|
||||
nouveau_channel_new(struct nouveau_cli *cli,
|
||||
bool priv, u64 runm, u32 vram, u32 gart, struct nouveau_channel **pchan)
|
||||
{
|
||||
struct nouveau_cli *cli = (void *)device->object.client;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_channel_ctor(drm, device, priv, runm, pchan);
|
||||
ret = nouveau_channel_ctor(cli, priv, runm, pchan);
|
||||
if (ret) {
|
||||
NV_PRINTK(dbg, cli, "channel create, %d\n", ret);
|
||||
return ret;
|
||||
|
|
|
@ -12,6 +12,7 @@ struct nouveau_channel {
|
|||
struct nvif_push *push;
|
||||
} chan;
|
||||
|
||||
struct nouveau_cli *cli;
|
||||
struct nvif_device *device;
|
||||
struct nouveau_drm *drm;
|
||||
struct nouveau_vmm *vmm;
|
||||
|
@ -62,7 +63,7 @@ struct nouveau_channel {
|
|||
int nouveau_channels_init(struct nouveau_drm *);
|
||||
void nouveau_channels_fini(struct nouveau_drm *);
|
||||
|
||||
int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *, bool priv, u64 runm,
|
||||
int nouveau_channel_new(struct nouveau_cli *, bool priv, u64 runm,
|
||||
u32 vram, u32 gart, struct nouveau_channel **);
|
||||
void nouveau_channel_del(struct nouveau_channel **);
|
||||
int nouveau_channel_idle(struct nouveau_channel *);
|
||||
|
|
|
@ -193,7 +193,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
|
|||
if (!spage || !(src & MIGRATE_PFN_MIGRATE))
|
||||
goto done;
|
||||
|
||||
dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
|
||||
dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address);
|
||||
if (!dpage)
|
||||
goto done;
|
||||
|
||||
|
|
|
@ -343,7 +343,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm)
|
|||
return;
|
||||
}
|
||||
|
||||
ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
|
||||
ret = nouveau_channel_new(&drm->client, true, runm, NvDmaFB, NvDmaTT, &drm->cechan);
|
||||
if (ret)
|
||||
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
|
||||
}
|
||||
|
@ -371,7 +371,7 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
|
|||
return;
|
||||
}
|
||||
|
||||
ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->channel);
|
||||
ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->channel);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
|
||||
nouveau_accel_gr_fini(drm);
|
||||
|
|
|
@ -2112,11 +2112,11 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
|
|||
};
|
||||
|
||||
static const struct drm_display_mode starry_himax83102_j02_default_mode = {
|
||||
.clock = 162850,
|
||||
.clock = 162680,
|
||||
.hdisplay = 1200,
|
||||
.hsync_start = 1200 + 50,
|
||||
.hsync_end = 1200 + 50 + 20,
|
||||
.htotal = 1200 + 50 + 20 + 50,
|
||||
.hsync_start = 1200 + 60,
|
||||
.hsync_end = 1200 + 60 + 20,
|
||||
.htotal = 1200 + 60 + 20 + 40,
|
||||
.vdisplay = 1920,
|
||||
.vsync_start = 1920 + 116,
|
||||
.vsync_end = 1920 + 116 + 8,
|
||||
|
|
|
@ -103,6 +103,11 @@ void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv)
|
|||
static int v3d_perfmon_idr_del(int id, void *elem, void *data)
|
||||
{
|
||||
struct v3d_perfmon *perfmon = elem;
|
||||
struct v3d_dev *v3d = (struct v3d_dev *)data;
|
||||
|
||||
/* If the active perfmon is being destroyed, stop it first */
|
||||
if (perfmon == v3d->active_perfmon)
|
||||
v3d_perfmon_stop(v3d, perfmon, false);
|
||||
|
||||
v3d_perfmon_put(perfmon);
|
||||
|
||||
|
@ -111,8 +116,10 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data)
|
|||
|
||||
void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv)
|
||||
{
|
||||
struct v3d_dev *v3d = v3d_priv->v3d;
|
||||
|
||||
mutex_lock(&v3d_priv->perfmon.lock);
|
||||
idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL);
|
||||
idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, v3d);
|
||||
idr_destroy(&v3d_priv->perfmon.idr);
|
||||
mutex_unlock(&v3d_priv->perfmon.lock);
|
||||
mutex_destroy(&v3d_priv->perfmon.lock);
|
||||
|
|
|
@ -116,6 +116,11 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file)
|
|||
static int vc4_perfmon_idr_del(int id, void *elem, void *data)
|
||||
{
|
||||
struct vc4_perfmon *perfmon = elem;
|
||||
struct vc4_dev *vc4 = (struct vc4_dev *)data;
|
||||
|
||||
/* If the active perfmon is being destroyed, stop it first */
|
||||
if (perfmon == vc4->active_perfmon)
|
||||
vc4_perfmon_stop(vc4, perfmon, false);
|
||||
|
||||
vc4_perfmon_put(perfmon);
|
||||
|
||||
|
@ -130,7 +135,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file)
|
|||
return;
|
||||
|
||||
mutex_lock(&vc4file->perfmon.lock);
|
||||
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
|
||||
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, vc4);
|
||||
idr_destroy(&vc4file->perfmon.idr);
|
||||
mutex_unlock(&vc4file->perfmon.lock);
|
||||
mutex_destroy(&vc4file->perfmon.lock);
|
||||
|
|
|
@ -1300,6 +1300,15 @@ config HID_ALPS
|
|||
Say Y here if you have a Alps touchpads over i2c-hid or usbhid
|
||||
and want support for its special functionalities.
|
||||
|
||||
config HID_MCP2200
|
||||
tristate "Microchip MCP2200 HID USB-to-GPIO bridge"
|
||||
depends on USB_HID && GPIOLIB
|
||||
help
|
||||
Provides GPIO functionality over USB-HID through MCP2200 device.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called hid-mcp2200.ko.
|
||||
|
||||
config HID_MCP2221
|
||||
tristate "Microchip MCP2221 HID USB-to-I2C/SMbus host support"
|
||||
depends on USB_HID && I2C
|
||||
|
|
|
@ -79,6 +79,7 @@ obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
|
|||
obj-$(CONFIG_HID_MACALLY) += hid-macally.o
|
||||
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
|
||||
obj-$(CONFIG_HID_MALTRON) += hid-maltron.o
|
||||
obj-$(CONFIG_HID_MCP2200) += hid-mcp2200.o
|
||||
obj-$(CONFIG_HID_MCP2221) += hid-mcp2221.o
|
||||
obj-$(CONFIG_HID_MAYFLASH) += hid-mf.o
|
||||
obj-$(CONFIG_HID_MEGAWORLD_FF) += hid-megaworld.o
|
||||
|
|
|
@ -236,9 +236,9 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
|
|||
cl_data->in_data = in_data;
|
||||
|
||||
for (i = 0; i < cl_data->num_hid_devices; i++) {
|
||||
in_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8,
|
||||
&cl_data->sensor_dma_addr[i],
|
||||
GFP_KERNEL);
|
||||
in_data->sensor_virt_addr[i] = dmam_alloc_coherent(dev, sizeof(int) * 8,
|
||||
&cl_data->sensor_dma_addr[i],
|
||||
GFP_KERNEL);
|
||||
if (!in_data->sensor_virt_addr[i]) {
|
||||
rc = -ENOMEM;
|
||||
goto cleanup;
|
||||
|
@ -331,7 +331,6 @@ cleanup:
|
|||
int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
|
||||
{
|
||||
struct amdtp_cl_data *cl_data = privdata->cl_data;
|
||||
struct amd_input_data *in_data = cl_data->in_data;
|
||||
int i, status;
|
||||
|
||||
for (i = 0; i < cl_data->num_hid_devices; i++) {
|
||||
|
@ -351,12 +350,5 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
|
|||
cancel_delayed_work_sync(&cl_data->work_buffer);
|
||||
amdtp_hid_remove(cl_data);
|
||||
|
||||
for (i = 0; i < cl_data->num_hid_devices; i++) {
|
||||
if (in_data->sensor_virt_addr[i]) {
|
||||
dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
|
||||
in_data->sensor_virt_addr[i],
|
||||
cl_data->sensor_dma_addr[i]);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -881,7 +881,10 @@ static int asus_input_mapping(struct hid_device *hdev,
|
|||
case 0xb3: asus_map_key_clear(KEY_PROG3); break; /* Fn+Left next aura */
|
||||
case 0x6a: asus_map_key_clear(KEY_F13); break; /* Screenpad toggle */
|
||||
case 0x4b: asus_map_key_clear(KEY_F14); break; /* Arrows/Pg-Up/Dn toggle */
|
||||
|
||||
case 0xa5: asus_map_key_clear(KEY_F15); break; /* ROG Ally left back */
|
||||
case 0xa6: asus_map_key_clear(KEY_F16); break; /* ROG Ally QAM button */
|
||||
case 0xa7: asus_map_key_clear(KEY_F17); break; /* ROG Ally ROG long-press */
|
||||
case 0xa8: asus_map_key_clear(KEY_F18); break; /* ROG Ally ROG long-press-release */
|
||||
|
||||
default:
|
||||
/* ASUS lazily declares 256 usages, ignore the rest,
|
||||
|
@ -1273,6 +1276,15 @@ static const struct hid_device_id asus_devices[] = {
|
|||
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
|
||||
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD3),
|
||||
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
|
||||
USB_DEVICE_ID_ASUSTEK_ROG_Z13_LIGHTBAR),
|
||||
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
|
||||
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY),
|
||||
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
|
||||
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X),
|
||||
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
|
||||
USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD),
|
||||
QUIRK_ROG_CLAYMORE_II_KEYBOARD },
|
||||
|
|
|
@ -208,6 +208,9 @@
|
|||
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866
|
||||
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2 0x19b6
|
||||
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD3 0x1a30
|
||||
#define USB_DEVICE_ID_ASUSTEK_ROG_Z13_LIGHTBAR 0x18c6
|
||||
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY 0x1abe
|
||||
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X 0x1b4c
|
||||
#define USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD 0x196b
|
||||
#define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869
|
||||
|
||||
|
@ -298,6 +301,9 @@
|
|||
|
||||
#define USB_VENDOR_ID_CIDC 0x1677
|
||||
|
||||
#define I2C_VENDOR_ID_CIRQUE 0x0488
|
||||
#define I2C_PRODUCT_ID_CIRQUE_1063 0x1063
|
||||
|
||||
#define USB_VENDOR_ID_CJTOUCH 0x24b8
|
||||
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020
|
||||
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040
|
||||
|
@ -499,6 +505,7 @@
|
|||
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
|
||||
|
||||
#define I2C_VENDOR_ID_GOODIX 0x27c6
|
||||
#define I2C_DEVICE_ID_GOODIX_01E0 0x01e0
|
||||
#define I2C_DEVICE_ID_GOODIX_01E8 0x01e8
|
||||
#define I2C_DEVICE_ID_GOODIX_01E9 0x01e9
|
||||
#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
|
||||
|
@ -907,6 +914,7 @@
|
|||
#define USB_DEVICE_ID_PICK16F1454 0x0042
|
||||
#define USB_DEVICE_ID_PICK16F1454_V2 0xf2f7
|
||||
#define USB_DEVICE_ID_LUXAFOR 0xf372
|
||||
#define USB_DEVICE_ID_MCP2200 0x00df
|
||||
#define USB_DEVICE_ID_MCP2221 0x00dd
|
||||
|
||||
#define USB_VENDOR_ID_MICROSOFT 0x045e
|
||||
|
@ -1025,6 +1033,8 @@
|
|||
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056
|
||||
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES 0xc057
|
||||
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES 0xc058
|
||||
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES 0x430c
|
||||
#define USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES 0x431e
|
||||
|
||||
#define USB_VENDOR_ID_PANASONIC 0x04da
|
||||
#define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
|
||||
|
|
|
@ -0,0 +1,392 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* MCP2200 - Microchip USB to GPIO bridge
|
||||
*
|
||||
* Copyright (c) 2023, Johannes Roith <johannes@gnu-linux.rocks>
|
||||
*
|
||||
* Datasheet: https://ww1.microchip.com/downloads/en/DeviceDoc/22228A.pdf
|
||||
* App Note for HID: https://ww1.microchip.com/downloads/en/DeviceDoc/93066A.pdf
|
||||
*/
|
||||
#include <linux/completion.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/gpio/driver.h>
|
||||
#include <linux/hid.h>
|
||||
#include <linux/hidraw.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include "hid-ids.h"
|
||||
|
||||
/* Commands codes in a raw output report */
|
||||
#define SET_CLEAR_OUTPUTS 0x08
|
||||
#define CONFIGURE 0x10
|
||||
#define READ_EE 0x20
|
||||
#define WRITE_EE 0x40
|
||||
#define READ_ALL 0x80
|
||||
|
||||
/* MCP GPIO direction encoding */
|
||||
enum MCP_IO_DIR {
|
||||
MCP2200_DIR_OUT = 0x00,
|
||||
MCP2200_DIR_IN = 0x01,
|
||||
};
|
||||
|
||||
/* Altternative pin assignments */
|
||||
#define TXLED 2
|
||||
#define RXLED 3
|
||||
#define USBCFG 6
|
||||
#define SSPND 7
|
||||
#define MCP_NGPIO 8
|
||||
|
||||
/* CMD to set or clear a GPIO output */
|
||||
struct mcp_set_clear_outputs {
|
||||
u8 cmd;
|
||||
u8 dummys1[10];
|
||||
u8 set_bmap;
|
||||
u8 clear_bmap;
|
||||
u8 dummys2[3];
|
||||
} __packed;
|
||||
|
||||
/* CMD to configure the IOs */
|
||||
struct mcp_configure {
|
||||
u8 cmd;
|
||||
u8 dummys1[3];
|
||||
u8 io_bmap;
|
||||
u8 config_alt_pins;
|
||||
u8 io_default_val_bmap;
|
||||
u8 config_alt_options;
|
||||
u8 baud_h;
|
||||
u8 baud_l;
|
||||
u8 dummys2[6];
|
||||
} __packed;
|
||||
|
||||
/* CMD to read all parameters */
|
||||
struct mcp_read_all {
|
||||
u8 cmd;
|
||||
u8 dummys[15];
|
||||
} __packed;
|
||||
|
||||
/* Response to the read all cmd */
|
||||
struct mcp_read_all_resp {
|
||||
u8 cmd;
|
||||
u8 eep_addr;
|
||||
u8 dummy;
|
||||
u8 eep_val;
|
||||
u8 io_bmap;
|
||||
u8 config_alt_pins;
|
||||
u8 io_default_val_bmap;
|
||||
u8 config_alt_options;
|
||||
u8 baud_h;
|
||||
u8 baud_l;
|
||||
u8 io_port_val_bmap;
|
||||
u8 dummys[5];
|
||||
} __packed;
|
||||
|
||||
struct mcp2200 {
|
||||
struct hid_device *hdev;
|
||||
struct mutex lock;
|
||||
struct completion wait_in_report;
|
||||
u8 gpio_dir;
|
||||
u8 gpio_val;
|
||||
u8 gpio_inval;
|
||||
u8 baud_h;
|
||||
u8 baud_l;
|
||||
u8 config_alt_pins;
|
||||
u8 gpio_reset_val;
|
||||
u8 config_alt_options;
|
||||
int status;
|
||||
struct gpio_chip gc;
|
||||
u8 hid_report[16];
|
||||
};
|
||||
|
||||
/* this executes the READ_ALL cmd */
|
||||
static int mcp_cmd_read_all(struct mcp2200 *mcp)
|
||||
{
|
||||
struct mcp_read_all *read_all;
|
||||
int len, t;
|
||||
|
||||
reinit_completion(&mcp->wait_in_report);
|
||||
|
||||
mutex_lock(&mcp->lock);
|
||||
|
||||
read_all = (struct mcp_read_all *) mcp->hid_report;
|
||||
read_all->cmd = READ_ALL;
|
||||
len = hid_hw_output_report(mcp->hdev, (u8 *) read_all,
|
||||
sizeof(struct mcp_read_all));
|
||||
|
||||
mutex_unlock(&mcp->lock);
|
||||
|
||||
if (len != sizeof(struct mcp_read_all))
|
||||
return -EINVAL;
|
||||
|
||||
t = wait_for_completion_timeout(&mcp->wait_in_report,
|
||||
msecs_to_jiffies(4000));
|
||||
if (!t)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
/* return status, negative value if wrong response was received */
|
||||
return mcp->status;
|
||||
}
|
||||
|
||||
static void mcp_set_multiple(struct gpio_chip *gc, unsigned long *mask,
|
||||
unsigned long *bits)
|
||||
{
|
||||
struct mcp2200 *mcp = gpiochip_get_data(gc);
|
||||
u8 value;
|
||||
int status;
|
||||
struct mcp_set_clear_outputs *cmd;
|
||||
|
||||
mutex_lock(&mcp->lock);
|
||||
cmd = (struct mcp_set_clear_outputs *) mcp->hid_report;
|
||||
|
||||
value = mcp->gpio_val & ~*mask;
|
||||
value |= (*mask & *bits);
|
||||
|
||||
cmd->cmd = SET_CLEAR_OUTPUTS;
|
||||
cmd->set_bmap = value;
|
||||
cmd->clear_bmap = ~(value);
|
||||
|
||||
status = hid_hw_output_report(mcp->hdev, (u8 *) cmd,
|
||||
sizeof(struct mcp_set_clear_outputs));
|
||||
|
||||
if (status == sizeof(struct mcp_set_clear_outputs))
|
||||
mcp->gpio_val = value;
|
||||
|
||||
mutex_unlock(&mcp->lock);
|
||||
}
|
||||
|
||||
static void mcp_set(struct gpio_chip *gc, unsigned int gpio_nr, int value)
|
||||
{
|
||||
unsigned long mask = 1 << gpio_nr;
|
||||
unsigned long bmap_value = value << gpio_nr;
|
||||
|
||||
mcp_set_multiple(gc, &mask, &bmap_value);
|
||||
}
|
||||
|
||||
static int mcp_get_multiple(struct gpio_chip *gc, unsigned long *mask,
|
||||
unsigned long *bits)
|
||||
{
|
||||
u32 val;
|
||||
struct mcp2200 *mcp = gpiochip_get_data(gc);
|
||||
int status;
|
||||
|
||||
status = mcp_cmd_read_all(mcp);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
val = mcp->gpio_inval;
|
||||
*bits = (val & *mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mcp_get(struct gpio_chip *gc, unsigned int gpio_nr)
|
||||
{
|
||||
unsigned long mask = 0, bits = 0;
|
||||
|
||||
mask = (1 << gpio_nr);
|
||||
mcp_get_multiple(gc, &mask, &bits);
|
||||
return bits > 0;
|
||||
}
|
||||
|
||||
static int mcp_get_direction(struct gpio_chip *gc, unsigned int gpio_nr)
|
||||
{
|
||||
struct mcp2200 *mcp = gpiochip_get_data(gc);
|
||||
|
||||
return (mcp->gpio_dir & (MCP2200_DIR_IN << gpio_nr))
|
||||
? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
|
||||
}
|
||||
|
||||
static int mcp_set_direction(struct gpio_chip *gc, unsigned int gpio_nr,
|
||||
enum MCP_IO_DIR io_direction)
|
||||
{
|
||||
struct mcp2200 *mcp = gpiochip_get_data(gc);
|
||||
struct mcp_configure *conf;
|
||||
int status;
|
||||
/* after the configure cmd we will need to set the outputs again */
|
||||
unsigned long mask = ~(mcp->gpio_dir); /* only set outputs */
|
||||
unsigned long bits = mcp->gpio_val;
|
||||
/* Offsets of alternative pins in config_alt_pins, 0 is not used */
|
||||
u8 alt_pin_conf[8] = {SSPND, USBCFG, 0, 0, 0, 0, RXLED, TXLED};
|
||||
u8 config_alt_pins = mcp->config_alt_pins;
|
||||
|
||||
/* Read in the reset baudrate first, we need it later */
|
||||
status = mcp_cmd_read_all(mcp);
|
||||
if (status != 0)
|
||||
return status;
|
||||
|
||||
mutex_lock(&mcp->lock);
|
||||
conf = (struct mcp_configure *) mcp->hid_report;
|
||||
|
||||
/* configure will reset the chip! */
|
||||
conf->cmd = CONFIGURE;
|
||||
conf->io_bmap = (mcp->gpio_dir & ~(1 << gpio_nr))
|
||||
| (io_direction << gpio_nr);
|
||||
/* Don't overwrite the reset parameters */
|
||||
conf->baud_h = mcp->baud_h;
|
||||
conf->baud_l = mcp->baud_l;
|
||||
conf->config_alt_options = mcp->config_alt_options;
|
||||
conf->io_default_val_bmap = mcp->gpio_reset_val;
|
||||
/* Adjust alt. func if necessary */
|
||||
if (alt_pin_conf[gpio_nr])
|
||||
config_alt_pins &= ~(1 << alt_pin_conf[gpio_nr]);
|
||||
conf->config_alt_pins = config_alt_pins;
|
||||
|
||||
status = hid_hw_output_report(mcp->hdev, (u8 *) conf,
|
||||
sizeof(struct mcp_set_clear_outputs));
|
||||
|
||||
if (status == sizeof(struct mcp_set_clear_outputs)) {
|
||||
mcp->gpio_dir = conf->io_bmap;
|
||||
mcp->config_alt_pins = config_alt_pins;
|
||||
} else {
|
||||
mutex_unlock(&mcp->lock);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
mutex_unlock(&mcp->lock);
|
||||
|
||||
/* Configure CMD will clear all IOs -> rewrite them */
|
||||
mcp_set_multiple(gc, &mask, &bits);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mcp_direction_input(struct gpio_chip *gc, unsigned int gpio_nr)
|
||||
{
|
||||
return mcp_set_direction(gc, gpio_nr, MCP2200_DIR_IN);
|
||||
}
|
||||
|
||||
static int mcp_direction_output(struct gpio_chip *gc, unsigned int gpio_nr,
|
||||
int value)
|
||||
{
|
||||
int ret;
|
||||
unsigned long mask, bmap_value;
|
||||
|
||||
mask = 1 << gpio_nr;
|
||||
bmap_value = value << gpio_nr;
|
||||
|
||||
ret = mcp_set_direction(gc, gpio_nr, MCP2200_DIR_OUT);
|
||||
if (!ret)
|
||||
mcp_set_multiple(gc, &mask, &bmap_value);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct gpio_chip template_chip = {
|
||||
.label = "mcp2200",
|
||||
.owner = THIS_MODULE,
|
||||
.get_direction = mcp_get_direction,
|
||||
.direction_input = mcp_direction_input,
|
||||
.direction_output = mcp_direction_output,
|
||||
.set = mcp_set,
|
||||
.set_multiple = mcp_set_multiple,
|
||||
.get = mcp_get,
|
||||
.get_multiple = mcp_get_multiple,
|
||||
.base = -1,
|
||||
.ngpio = MCP_NGPIO,
|
||||
.can_sleep = true,
|
||||
};
|
||||
|
||||
/*
|
||||
* MCP2200 uses interrupt endpoint for input reports. This function
|
||||
* is called by HID layer when it receives i/p report from mcp2200,
|
||||
* which is actually a response to the previously sent command.
|
||||
*/
|
||||
static int mcp2200_raw_event(struct hid_device *hdev, struct hid_report *report,
|
||||
u8 *data, int size)
|
||||
{
|
||||
struct mcp2200 *mcp = hid_get_drvdata(hdev);
|
||||
struct mcp_read_all_resp *all_resp;
|
||||
|
||||
switch (data[0]) {
|
||||
case READ_ALL:
|
||||
all_resp = (struct mcp_read_all_resp *) data;
|
||||
mcp->status = 0;
|
||||
mcp->gpio_inval = all_resp->io_port_val_bmap;
|
||||
mcp->baud_h = all_resp->baud_h;
|
||||
mcp->baud_l = all_resp->baud_l;
|
||||
mcp->gpio_reset_val = all_resp->io_default_val_bmap;
|
||||
mcp->config_alt_pins = all_resp->config_alt_pins;
|
||||
mcp->config_alt_options = all_resp->config_alt_options;
|
||||
break;
|
||||
default:
|
||||
mcp->status = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
complete(&mcp->wait_in_report);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mcp2200_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
{
|
||||
int ret;
|
||||
struct mcp2200 *mcp;
|
||||
|
||||
mcp = devm_kzalloc(&hdev->dev, sizeof(*mcp), GFP_KERNEL);
|
||||
if (!mcp)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = hid_parse(hdev);
|
||||
if (ret) {
|
||||
hid_err(hdev, "can't parse reports\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hid_hw_start(hdev, 0);
|
||||
if (ret) {
|
||||
hid_err(hdev, "can't start hardware\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
hid_info(hdev, "USB HID v%x.%02x Device [%s] on %s\n", hdev->version >> 8,
|
||||
hdev->version & 0xff, hdev->name, hdev->phys);
|
||||
|
||||
ret = hid_hw_open(hdev);
|
||||
if (ret) {
|
||||
hid_err(hdev, "can't open device\n");
|
||||
hid_hw_stop(hdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_init(&mcp->lock);
|
||||
init_completion(&mcp->wait_in_report);
|
||||
hid_set_drvdata(hdev, mcp);
|
||||
mcp->hdev = hdev;
|
||||
|
||||
mcp->gc = template_chip;
|
||||
mcp->gc.parent = &hdev->dev;
|
||||
|
||||
ret = devm_gpiochip_add_data(&hdev->dev, &mcp->gc, mcp);
|
||||
if (ret < 0) {
|
||||
hid_err(hdev, "Unable to register gpiochip\n");
|
||||
hid_hw_close(hdev);
|
||||
hid_hw_stop(hdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mcp2200_remove(struct hid_device *hdev)
|
||||
{
|
||||
hid_hw_close(hdev);
|
||||
hid_hw_stop(hdev);
|
||||
}
|
||||
|
||||
static const struct hid_device_id mcp2200_devices[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_MCP2200) },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(hid, mcp2200_devices);
|
||||
|
||||
static struct hid_driver mcp2200_driver = {
|
||||
.name = "mcp2200",
|
||||
.id_table = mcp2200_devices,
|
||||
.probe = mcp2200_probe,
|
||||
.remove = mcp2200_remove,
|
||||
.raw_event = mcp2200_raw_event,
|
||||
};
|
||||
|
||||
/* Register with HID core */
|
||||
module_hid_driver(mcp2200_driver);
|
||||
|
||||
MODULE_AUTHOR("Johannes Roith <johannes@gnu-linux.rocks>");
|
||||
MODULE_DESCRIPTION("MCP2200 Microchip HID USB to GPIO bridge");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -1447,7 +1447,8 @@ static __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|||
{
|
||||
if (hdev->vendor == I2C_VENDOR_ID_GOODIX &&
|
||||
(hdev->product == I2C_DEVICE_ID_GOODIX_01E8 ||
|
||||
hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) {
|
||||
hdev->product == I2C_DEVICE_ID_GOODIX_01E9 ||
|
||||
hdev->product == I2C_DEVICE_ID_GOODIX_01E0)) {
|
||||
if (rdesc[607] == 0x15) {
|
||||
rdesc[607] = 0x25;
|
||||
dev_info(
|
||||
|
@ -2068,7 +2069,10 @@ static const struct hid_device_id mt_devices[] = {
|
|||
I2C_DEVICE_ID_GOODIX_01E8) },
|
||||
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
|
||||
HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
|
||||
I2C_DEVICE_ID_GOODIX_01E8) },
|
||||
I2C_DEVICE_ID_GOODIX_01E9) },
|
||||
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
|
||||
HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
|
||||
I2C_DEVICE_ID_GOODIX_01E0) },
|
||||
|
||||
/* GoodTouch panels */
|
||||
{ .driver_data = MT_CLS_NSMU,
|
||||
|
|
|
@ -38,8 +38,10 @@
|
|||
(usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
|
||||
|
||||
#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
|
||||
#define PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS BIT(1)
|
||||
|
||||
#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
|
||||
#define PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT 220 /* ms */
|
||||
|
||||
struct plt_drv_data {
|
||||
unsigned long device_type;
|
||||
|
@ -137,6 +139,21 @@ static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
|
|||
|
||||
drv_data->last_volume_key_ts = cur_ts;
|
||||
}
|
||||
if (drv_data->quirks & PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS) {
|
||||
unsigned long prev_ts, cur_ts;
|
||||
|
||||
/* Usages are filtered in plantronics_usages. */
|
||||
|
||||
if (!value) /* Handle key presses only. */
|
||||
return 0;
|
||||
|
||||
prev_ts = drv_data->last_volume_key_ts;
|
||||
cur_ts = jiffies;
|
||||
if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT)
|
||||
return 1; /* Ignore the followed opposite volume key. */
|
||||
|
||||
drv_data->last_volume_key_ts = cur_ts;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -210,6 +227,12 @@ static const struct hid_device_id plantronics_devices[] = {
|
|||
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
|
||||
USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES),
|
||||
.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
|
||||
USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES),
|
||||
.driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
|
||||
USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES),
|
||||
.driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
|
||||
{ }
|
||||
};
|
||||
|
|
|
@ -44,12 +44,12 @@
|
|||
#include "i2c-hid.h"
|
||||
|
||||
/* quirks to control the device */
|
||||
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
|
||||
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
|
||||
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
|
||||
#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
|
||||
#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6)
|
||||
#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(7)
|
||||
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(0)
|
||||
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(1)
|
||||
#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(2)
|
||||
#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(3)
|
||||
#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(4)
|
||||
#define I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND BIT(5)
|
||||
|
||||
/* Command opcodes */
|
||||
#define I2C_HID_OPCODE_RESET 0x01
|
||||
|
@ -119,8 +119,6 @@ static const struct i2c_hid_quirks {
|
|||
__u16 idProduct;
|
||||
__u32 quirks;
|
||||
} i2c_hid_quirks[] = {
|
||||
{ USB_VENDOR_ID_WEIDA, HID_ANY_ID,
|
||||
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
|
||||
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
|
||||
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
|
||||
{ I2C_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15,
|
||||
|
@ -133,6 +131,8 @@ static const struct i2c_hid_quirks {
|
|||
I2C_HID_QUIRK_RESET_ON_RESUME },
|
||||
{ USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
|
||||
I2C_HID_QUIRK_BAD_INPUT_SIZE },
|
||||
{ I2C_VENDOR_ID_CIRQUE, I2C_PRODUCT_ID_CIRQUE_1063,
|
||||
I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND },
|
||||
/*
|
||||
* Sending the wakeup after reset actually break ELAN touchscreen controller
|
||||
*/
|
||||
|
@ -389,8 +389,7 @@ static int i2c_hid_set_power(struct i2c_hid *ihid, int power_state)
|
|||
* The call will get a return value (EREMOTEIO) but device will be
|
||||
* triggered and activated. After that, it goes like a normal device.
|
||||
*/
|
||||
if (power_state == I2C_HID_PWR_ON &&
|
||||
ihid->quirks & I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV) {
|
||||
if (power_state == I2C_HID_PWR_ON) {
|
||||
ret = i2c_hid_set_power_command(ihid, I2C_HID_PWR_ON);
|
||||
|
||||
/* Device was already activated */
|
||||
|
@ -949,7 +948,8 @@ static int i2c_hid_core_suspend(struct i2c_hid *ihid, bool force_poweroff)
|
|||
return ret;
|
||||
|
||||
/* Save some power */
|
||||
i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
|
||||
if (!(ihid->quirks & I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND))
|
||||
i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
|
||||
|
||||
disable_irq(client->irq);
|
||||
|
||||
|
|
|
@ -635,7 +635,7 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data,
|
|||
const struct firmware *fw,
|
||||
const struct shim_fw_info fw_info)
|
||||
{
|
||||
int rv;
|
||||
int rv = 0;
|
||||
void *dma_buf;
|
||||
dma_addr_t dma_buf_phy;
|
||||
u32 fragment_offset, fragment_size, payload_max_size;
|
||||
|
|
|
@ -174,6 +174,7 @@ config SENSORS_ADM9240
|
|||
tristate "Analog Devices ADM9240 and compatibles"
|
||||
depends on I2C
|
||||
select HWMON_VID
|
||||
select REGMAP_I2C
|
||||
help
|
||||
If you say yes here you get support for Analog Devices ADM9240,
|
||||
Dallas DS1780, National Semiconductor LM81 sensor chips.
|
||||
|
@ -235,6 +236,7 @@ config SENSORS_ADT7462
|
|||
config SENSORS_ADT7470
|
||||
tristate "Analog Devices ADT7470"
|
||||
depends on I2C
|
||||
select REGMAP_I2C
|
||||
help
|
||||
If you say yes here you get support for the Analog Devices
|
||||
ADT7470 temperature monitoring chips.
|
||||
|
@ -1200,6 +1202,7 @@ config SENSORS_MAX31790
|
|||
config SENSORS_MC34VR500
|
||||
tristate "NXP MC34VR500 hardware monitoring driver"
|
||||
depends on I2C
|
||||
select REGMAP_I2C
|
||||
help
|
||||
If you say yes here you get support for the temperature and input
|
||||
voltage sensors of the NXP MC34VR500.
|
||||
|
@ -2137,6 +2140,7 @@ config SENSORS_TMP464
|
|||
config SENSORS_TMP513
|
||||
tristate "Texas Instruments TMP513 and compatibles"
|
||||
depends on I2C
|
||||
select REGMAP_I2C
|
||||
help
|
||||
If you say yes here you get support for Texas Instruments TMP512,
|
||||
and TMP513 temperature and power supply sensor chips.
|
||||
|
|
|
@ -358,7 +358,7 @@ static const struct m10bmc_sdata n6000bmc_temp_tbl[] = {
|
|||
{ 0x4f0, 0x4f4, 0x4f8, 0x52c, 0x0, 500, "Board Top Near FPGA Temperature" },
|
||||
{ 0x4fc, 0x500, 0x504, 0x52c, 0x0, 500, "Board Bottom Near CVL Temperature" },
|
||||
{ 0x508, 0x50c, 0x510, 0x52c, 0x0, 500, "Board Top East Near VRs Temperature" },
|
||||
{ 0x514, 0x518, 0x51c, 0x52c, 0x0, 500, "Columbiaville Die Temperature" },
|
||||
{ 0x514, 0x518, 0x51c, 0x52c, 0x0, 500, "CVL Die Temperature" },
|
||||
{ 0x520, 0x524, 0x528, 0x52c, 0x0, 500, "Board Rear Side Temperature" },
|
||||
{ 0x530, 0x534, 0x538, 0x52c, 0x0, 500, "Board Front Side Temperature" },
|
||||
{ 0x53c, 0x540, 0x544, 0x0, 0x0, 500, "QSFP1 Case Temperature" },
|
||||
|
|
|
@ -627,6 +627,7 @@ static const struct pci_device_id k10temp_id_table[] = {
|
|||
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
|
||||
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
|
||||
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
|
||||
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) },
|
||||
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
|
||||
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
|
||||
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) },
|
||||
|
|
|
@ -1754,8 +1754,15 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
|
||||
i801_add_tco(priv);
|
||||
|
||||
/*
|
||||
* adapter.name is used by platform code to find the main I801 adapter
|
||||
* to instantiante i2c_clients, do not change.
|
||||
*/
|
||||
snprintf(priv->adapter.name, sizeof(priv->adapter.name),
|
||||
"SMBus I801 adapter at %04lx", priv->smba);
|
||||
"SMBus %s adapter at %04lx",
|
||||
(priv->features & FEATURE_IDF) ? "I801 IDF" : "I801",
|
||||
priv->smba);
|
||||
|
||||
err = i2c_add_adapter(&priv->adapter);
|
||||
if (err) {
|
||||
platform_device_unregister(priv->tco_pdev);
|
||||
|
|
|
@ -1666,6 +1666,7 @@ static void cdns_i3c_master_remove(struct platform_device *pdev)
|
|||
{
|
||||
struct cdns_i3c_master *master = platform_get_drvdata(pdev);
|
||||
|
||||
cancel_work_sync(&master->hj_work);
|
||||
i3c_master_unregister(&master->base);
|
||||
|
||||
clk_disable_unprepare(master->sysclk);
|
||||
|
|
|
@ -2616,14 +2616,16 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
|
|||
|
||||
static void timeout_sends(struct work_struct *work)
|
||||
{
|
||||
struct ib_mad_send_wr_private *mad_send_wr, *n;
|
||||
struct ib_mad_agent_private *mad_agent_priv;
|
||||
struct ib_mad_send_wr_private *mad_send_wr;
|
||||
struct ib_mad_send_wc mad_send_wc;
|
||||
struct list_head local_list;
|
||||
unsigned long flags, delay;
|
||||
|
||||
mad_agent_priv = container_of(work, struct ib_mad_agent_private,
|
||||
timed_work.work);
|
||||
mad_send_wc.vendor_err = 0;
|
||||
INIT_LIST_HEAD(&local_list);
|
||||
|
||||
spin_lock_irqsave(&mad_agent_priv->lock, flags);
|
||||
while (!list_empty(&mad_agent_priv->wait_list)) {
|
||||
|
@ -2641,13 +2643,16 @@ static void timeout_sends(struct work_struct *work)
|
|||
break;
|
||||
}
|
||||
|
||||
list_del(&mad_send_wr->agent_list);
|
||||
list_del_init(&mad_send_wr->agent_list);
|
||||
if (mad_send_wr->status == IB_WC_SUCCESS &&
|
||||
!retry_send(mad_send_wr))
|
||||
continue;
|
||||
|
||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
|
||||
list_add_tail(&mad_send_wr->agent_list, &local_list);
|
||||
}
|
||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(mad_send_wr, n, &local_list, agent_list) {
|
||||
if (mad_send_wr->status == IB_WC_SUCCESS)
|
||||
mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
|
||||
else
|
||||
|
@ -2655,11 +2660,8 @@ static void timeout_sends(struct work_struct *work)
|
|||
mad_send_wc.send_buf = &mad_send_wr->send_buf;
|
||||
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
|
||||
&mad_send_wc);
|
||||
|
||||
deref_mad_agent(mad_agent_priv);
|
||||
spin_lock_irqsave(&mad_agent_priv->lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -733,24 +733,31 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
|
|||
* >0: Number of pages mapped
|
||||
*/
|
||||
static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
|
||||
u32 *bytes_mapped, u32 flags)
|
||||
u32 *bytes_mapped, u32 flags, bool permissive_fault)
|
||||
{
|
||||
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
|
||||
|
||||
if (unlikely(io_virt < mr->ibmr.iova))
|
||||
if (unlikely(io_virt < mr->ibmr.iova) && !permissive_fault)
|
||||
return -EFAULT;
|
||||
|
||||
if (mr->umem->is_dmabuf)
|
||||
return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags);
|
||||
|
||||
if (!odp->is_implicit_odp) {
|
||||
u64 offset = io_virt < mr->ibmr.iova ? 0 : io_virt - mr->ibmr.iova;
|
||||
u64 user_va;
|
||||
|
||||
if (check_add_overflow(io_virt - mr->ibmr.iova,
|
||||
(u64)odp->umem.address, &user_va))
|
||||
if (check_add_overflow(offset, (u64)odp->umem.address,
|
||||
&user_va))
|
||||
return -EFAULT;
|
||||
if (unlikely(user_va >= ib_umem_end(odp) ||
|
||||
ib_umem_end(odp) - user_va < bcnt))
|
||||
|
||||
if (permissive_fault) {
|
||||
if (user_va < ib_umem_start(odp))
|
||||
user_va = ib_umem_start(odp);
|
||||
if ((user_va + bcnt) > ib_umem_end(odp))
|
||||
bcnt = ib_umem_end(odp) - user_va;
|
||||
} else if (unlikely(user_va >= ib_umem_end(odp) ||
|
||||
ib_umem_end(odp) - user_va < bcnt))
|
||||
return -EFAULT;
|
||||
return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
|
||||
flags);
|
||||
|
@ -857,7 +864,7 @@ next_mr:
|
|||
case MLX5_MKEY_MR:
|
||||
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
|
||||
|
||||
ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
|
||||
ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
|
@ -1710,7 +1717,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
|
|||
for (i = 0; i < work->num_sge; ++i) {
|
||||
ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
|
||||
work->frags[i].length, &bytes_mapped,
|
||||
work->pf_flags);
|
||||
work->pf_flags, false);
|
||||
if (ret <= 0)
|
||||
continue;
|
||||
mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
|
||||
|
@ -1761,7 +1768,7 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
|
|||
if (IS_ERR(mr))
|
||||
return PTR_ERR(mr);
|
||||
ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
|
||||
&bytes_mapped, pf_flags);
|
||||
&bytes_mapped, pf_flags, false);
|
||||
if (ret < 0) {
|
||||
mlx5r_deref_odp_mkey(&mr->mmkey);
|
||||
return ret;
|
||||
|
|
|
@ -931,12 +931,11 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||
if (err)
|
||||
goto close;
|
||||
|
||||
out:
|
||||
rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
|
||||
return;
|
||||
close:
|
||||
rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
|
||||
close_path(srv_path);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int post_recv_info_req(struct rtrs_srv_con *con)
|
||||
|
@ -987,6 +986,16 @@ static int post_recv_path(struct rtrs_srv_path *srv_path)
|
|||
q_size = SERVICE_CON_QUEUE_DEPTH;
|
||||
else
|
||||
q_size = srv->queue_depth;
|
||||
if (srv_path->state != RTRS_SRV_CONNECTING) {
|
||||
rtrs_err(s, "Path state invalid. state %s\n",
|
||||
rtrs_srv_state_str(srv_path->state));
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (!srv_path->s.con[cid]) {
|
||||
rtrs_err(s, "Conn not set for %d\n", cid);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size);
|
||||
if (err) {
|
||||
|
|
|
@ -978,12 +978,12 @@ static int rmi_driver_remove(struct device *dev)
|
|||
|
||||
rmi_disable_irq(rmi_dev, false);
|
||||
|
||||
irq_domain_remove(data->irqdomain);
|
||||
data->irqdomain = NULL;
|
||||
|
||||
rmi_f34_remove_sysfs(rmi_dev);
|
||||
rmi_free_function_list(rmi_dev);
|
||||
|
||||
irq_domain_remove(data->irqdomain);
|
||||
data->irqdomain = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -302,6 +302,10 @@ static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
|
|||
p->mem_priv = NULL;
|
||||
p->dbuf = NULL;
|
||||
p->dbuf_mapped = 0;
|
||||
p->bytesused = 0;
|
||||
p->length = 0;
|
||||
p->m.fd = 0;
|
||||
p->data_offset = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1296,10 +1300,6 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
|
|||
|
||||
/* Release previously acquired memory if present */
|
||||
__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
|
||||
vb->planes[plane].bytesused = 0;
|
||||
vb->planes[plane].length = 0;
|
||||
vb->planes[plane].m.fd = 0;
|
||||
vb->planes[plane].data_offset = 0;
|
||||
|
||||
/* Acquire each plane's memory */
|
||||
mem_priv = call_ptr_memop(attach_dmabuf,
|
||||
|
|
|
@ -178,7 +178,6 @@ static const struct dmi_system_id cht_wc_model_dmi_ids[] = {
|
|||
.driver_data = (void *)(long)INTEL_CHT_WC_LENOVO_YT3_X90,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
|
||||
},
|
||||
},
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <linux/phylink.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/if_bridge.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/dsa.h>
|
||||
|
||||
#include "b53_regs.h"
|
||||
|
@ -224,6 +225,9 @@ static const struct b53_mib_desc b53_mibs_58xx[] = {
|
|||
|
||||
#define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx)
|
||||
|
||||
#define B53_MAX_MTU_25 (1536 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
|
||||
#define B53_MAX_MTU (9720 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN)
|
||||
|
||||
static int b53_do_vlan_op(struct b53_device *dev, u8 op)
|
||||
{
|
||||
unsigned int i;
|
||||
|
@ -2263,20 +2267,25 @@ static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
|
|||
bool allow_10_100;
|
||||
|
||||
if (is5325(dev) || is5365(dev))
|
||||
return -EOPNOTSUPP;
|
||||
return 0;
|
||||
|
||||
if (!dsa_is_cpu_port(ds, port))
|
||||
return 0;
|
||||
|
||||
enable_jumbo = (mtu >= JMS_MIN_SIZE);
|
||||
allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
|
||||
enable_jumbo = (mtu > ETH_DATA_LEN);
|
||||
allow_10_100 = !is63xx(dev);
|
||||
|
||||
return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
|
||||
}
|
||||
|
||||
static int b53_get_max_mtu(struct dsa_switch *ds, int port)
|
||||
{
|
||||
return JMS_MAX_SIZE;
|
||||
struct b53_device *dev = ds->priv;
|
||||
|
||||
if (is5325(dev) || is5365(dev))
|
||||
return B53_MAX_MTU_25;
|
||||
|
||||
return B53_MAX_MTU;
|
||||
}
|
||||
|
||||
static const struct dsa_switch_ops b53_switch_ops = {
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/mii.h>
|
||||
#include <linux/of.h>
|
||||
|
@ -839,6 +840,8 @@ static void lan9303_handle_reset(struct lan9303 *chip)
|
|||
if (!chip->reset_gpio)
|
||||
return;
|
||||
|
||||
gpiod_set_value_cansleep(chip->reset_gpio, 1);
|
||||
|
||||
if (chip->reset_duration != 0)
|
||||
msleep(chip->reset_duration);
|
||||
|
||||
|
@ -864,8 +867,34 @@ static int lan9303_disable_processing(struct lan9303 *chip)
|
|||
static int lan9303_check_device(struct lan9303 *chip)
|
||||
{
|
||||
int ret;
|
||||
int err;
|
||||
u32 reg;
|
||||
|
||||
/* In I2C-managed configurations this polling loop will clash with
|
||||
* switch's reading of EEPROM right after reset and this behaviour is
|
||||
* not configurable. While lan9303_read() already has quite long retry
|
||||
* timeout, seems not all cases are being detected as arbitration error.
|
||||
*
|
||||
* According to datasheet, EEPROM loader has 30ms timeout (in case of
|
||||
* missing EEPROM).
|
||||
*
|
||||
* Loading of the largest supported EEPROM is expected to take at least
|
||||
* 5.9s.
|
||||
*/
|
||||
err = read_poll_timeout(lan9303_read, ret,
|
||||
!ret && reg & LAN9303_HW_CFG_READY,
|
||||
20000, 6000000, false,
|
||||
chip->regmap, LAN9303_HW_CFG, ®);
|
||||
if (ret) {
|
||||
dev_err(chip->dev, "failed to read HW_CFG reg: %pe\n",
|
||||
ERR_PTR(ret));
|
||||
return ret;
|
||||
}
|
||||
if (err) {
|
||||
dev_err(chip->dev, "HW_CFG not ready: 0x%08x\n", reg);
|
||||
return err;
|
||||
}
|
||||
|
||||
ret = lan9303_read(chip->regmap, LAN9303_CHIP_REV, ®);
|
||||
if (ret) {
|
||||
dev_err(chip->dev, "failed to read chip revision register: %d\n",
|
||||
|
|
|
@ -318,11 +318,11 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
|
|||
* from the ADIN1110 frame header.
|
||||
*/
|
||||
if (frame_size < ADIN1110_FRAME_HEADER_LEN + ADIN1110_FEC_LEN)
|
||||
return ret;
|
||||
return -EINVAL;
|
||||
|
||||
round_len = adin1110_round_len(frame_size);
|
||||
if (round_len < 0)
|
||||
return ret;
|
||||
return -EINVAL;
|
||||
|
||||
frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN;
|
||||
memset(priv->data, 0, ADIN1110_RD_HEADER_LEN);
|
||||
|
|
|
@ -451,6 +451,9 @@ static void pdsc_remove(struct pci_dev *pdev)
|
|||
|
||||
static void pdsc_stop_health_thread(struct pdsc *pdsc)
|
||||
{
|
||||
if (pdsc->pdev->is_virtfn)
|
||||
return;
|
||||
|
||||
timer_shutdown_sync(&pdsc->wdtimer);
|
||||
if (pdsc->health_work.func)
|
||||
cancel_work_sync(&pdsc->health_work);
|
||||
|
@ -458,6 +461,9 @@ static void pdsc_stop_health_thread(struct pdsc *pdsc)
|
|||
|
||||
static void pdsc_restart_health_thread(struct pdsc *pdsc)
|
||||
{
|
||||
if (pdsc->pdev->is_virtfn)
|
||||
return;
|
||||
|
||||
timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
|
||||
mod_timer(&pdsc->wdtimer, jiffies + 1);
|
||||
}
|
||||
|
|
|
@ -79,8 +79,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
|
|||
#define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
|
||||
|
||||
#define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
|
||||
NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
|
||||
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
|
||||
NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
|
||||
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
|
||||
|
||||
/**
|
||||
* struct gmac_queue_page - page buffer per-page info
|
||||
|
@ -1148,24 +1148,26 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
|
|||
struct gmac_txdesc *txd;
|
||||
skb_frag_t *skb_frag;
|
||||
dma_addr_t mapping;
|
||||
unsigned short mtu;
|
||||
void *buffer;
|
||||
u16 mss;
|
||||
int ret;
|
||||
|
||||
mtu = ETH_HLEN;
|
||||
mtu += netdev->mtu;
|
||||
if (skb->protocol == htons(ETH_P_8021Q))
|
||||
mtu += VLAN_HLEN;
|
||||
|
||||
word1 = skb->len;
|
||||
word3 = SOF_BIT;
|
||||
|
||||
if (word1 > mtu) {
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
if (mss) {
|
||||
/* This means we are dealing with TCP and skb->len is the
|
||||
* sum total of all the segments. The TSO will deal with
|
||||
* chopping this up for us.
|
||||
*/
|
||||
/* The accelerator needs the full frame size here */
|
||||
mss += skb_tcp_all_headers(skb);
|
||||
netdev_dbg(netdev, "segment offloading mss = %04x len=%04x\n",
|
||||
mss, skb->len);
|
||||
word1 |= TSS_MTU_ENABLE_BIT;
|
||||
word3 |= mtu;
|
||||
}
|
||||
|
||||
if (skb->len >= ETH_FRAME_LEN) {
|
||||
word3 |= mss;
|
||||
} else if (skb->len >= ETH_FRAME_LEN) {
|
||||
/* Hardware offloaded checksumming isn't working on frames
|
||||
* bigger than 1514 bytes. A hypothesis about this is that the
|
||||
* checksum buffer is only 1518 bytes, so when the frames get
|
||||
|
@ -1180,7 +1182,9 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
|
|||
return ret;
|
||||
}
|
||||
word1 |= TSS_BYPASS_BIT;
|
||||
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
}
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
int tcp = 0;
|
||||
|
||||
/* We do not switch off the checksumming on non TCP/UDP
|
||||
|
|
|
@ -1058,7 +1058,8 @@ fec_restart(struct net_device *ndev)
|
|||
u32 rcntl = OPT_FRAME_SIZE | 0x04;
|
||||
u32 ecntl = FEC_ECR_ETHEREN;
|
||||
|
||||
fec_ptp_save_state(fep);
|
||||
if (fep->bufdesc_ex)
|
||||
fec_ptp_save_state(fep);
|
||||
|
||||
/* Whack a reset. We should wait for this.
|
||||
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
|
||||
|
@ -1321,7 +1322,8 @@ fec_stop(struct net_device *ndev)
|
|||
netdev_err(ndev, "Graceful transmit stop did not complete!\n");
|
||||
}
|
||||
|
||||
fec_ptp_save_state(fep);
|
||||
if (fep->bufdesc_ex)
|
||||
fec_ptp_save_state(fep);
|
||||
|
||||
/* Whack a reset. We should wait for this.
|
||||
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
|
||||
|
|
|
@ -578,7 +578,7 @@ static int mal_probe(struct platform_device *ofdev)
|
|||
printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n",
|
||||
ofdev->dev.of_node);
|
||||
err = -ENODEV;
|
||||
goto fail;
|
||||
goto fail_unmap;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -108,8 +108,8 @@ struct e1000_hw;
|
|||
#define E1000_DEV_ID_PCH_RPL_I219_V22 0x0DC8
|
||||
#define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A
|
||||
#define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B
|
||||
#define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C
|
||||
#define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D
|
||||
#define E1000_DEV_ID_PCH_ADP_I219_LM19 0x550C
|
||||
#define E1000_DEV_ID_PCH_ADP_I219_V19 0x550D
|
||||
#define E1000_DEV_ID_PCH_LNP_I219_LM20 0x550E
|
||||
#define E1000_DEV_ID_PCH_LNP_I219_V20 0x550F
|
||||
#define E1000_DEV_ID_PCH_LNP_I219_LM21 0x5510
|
||||
|
|
|
@ -1108,6 +1108,46 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000e_force_smbus - Force interfaces to transition to SMBUS mode.
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Force the MAC and the PHY to SMBUS mode. Assumes semaphore already
|
||||
* acquired.
|
||||
*
|
||||
* Return: 0 on success, negative errno on failure.
|
||||
**/
|
||||
static s32 e1000e_force_smbus(struct e1000_hw *hw)
|
||||
{
|
||||
u16 smb_ctrl = 0;
|
||||
u32 ctrl_ext;
|
||||
s32 ret_val;
|
||||
|
||||
/* Switching PHY interface always returns MDI error
|
||||
* so disable retry mechanism to avoid wasting time
|
||||
*/
|
||||
e1000e_disable_phy_retry(hw);
|
||||
|
||||
/* Force SMBus mode in the PHY */
|
||||
ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &smb_ctrl);
|
||||
if (ret_val) {
|
||||
e1000e_enable_phy_retry(hw);
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
|
||||
e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, smb_ctrl);
|
||||
|
||||
e1000e_enable_phy_retry(hw);
|
||||
|
||||
/* Force SMBus mode in the MAC */
|
||||
ctrl_ext = er32(CTRL_EXT);
|
||||
ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
|
||||
ew32(CTRL_EXT, ctrl_ext);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -1165,6 +1205,14 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
|
|||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
if (hw->mac.type != e1000_pch_mtp) {
|
||||
ret_val = e1000e_force_smbus(hw);
|
||||
if (ret_val) {
|
||||
e_dbg("Failed to force SMBUS: %d\n", ret_val);
|
||||
goto release;
|
||||
}
|
||||
}
|
||||
|
||||
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
|
||||
* LPLU and disable Gig speed when entering ULP
|
||||
*/
|
||||
|
@ -1225,6 +1273,13 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
|
|||
}
|
||||
|
||||
release:
|
||||
if (hw->mac.type == e1000_pch_mtp) {
|
||||
ret_val = e1000e_force_smbus(hw);
|
||||
if (ret_val)
|
||||
e_dbg("Failed to force SMBUS over MTL system: %d\n",
|
||||
ret_val);
|
||||
}
|
||||
|
||||
hw->phy.ops.release(hw);
|
||||
out:
|
||||
if (ret_val)
|
||||
|
|
|
@ -6623,7 +6623,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
|
|||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 ctrl, ctrl_ext, rctl, status, wufc;
|
||||
int retval = 0;
|
||||
u16 smb_ctrl;
|
||||
|
||||
/* Runtime suspend should only enable wakeup for link changes */
|
||||
if (runtime)
|
||||
|
@ -6701,23 +6700,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
|
|||
goto skip_phy_configurations;
|
||||
}
|
||||
}
|
||||
|
||||
/* Force SMBUS to allow WOL */
|
||||
/* Switching PHY interface always returns MDI error
|
||||
* so disable retry mechanism to avoid wasting time
|
||||
*/
|
||||
e1000e_disable_phy_retry(hw);
|
||||
|
||||
e1e_rphy(hw, CV_SMB_CTRL, &smb_ctrl);
|
||||
smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
|
||||
e1e_wphy(hw, CV_SMB_CTRL, smb_ctrl);
|
||||
|
||||
e1000e_enable_phy_retry(hw);
|
||||
|
||||
/* Force SMBus mode in MAC */
|
||||
ctrl_ext = er32(CTRL_EXT);
|
||||
ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
|
||||
ew32(CTRL_EXT, ctrl_ext);
|
||||
}
|
||||
|
||||
/* Ensure that the appropriate bits are set in LPI_CTRL
|
||||
|
@ -7917,10 +7899,10 @@ static const struct pci_device_id e1000_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM19), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V19), board_pch_adp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_mtp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_mtp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_mtp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_mtp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_mtp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_mtp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_mtp },
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#define _I40E_ADMINQ_CMD_H_
|
||||
|
||||
#include <linux/bits.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* This header file defines the i40e Admin Queue commands and is shared between
|
||||
* i40e Firmware and Software.
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#ifndef _I40E_DIAG_H_
|
||||
#define _I40E_DIAG_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include "i40e_adminq_cmd.h"
|
||||
|
||||
/* forward-declare the HW struct for the compiler */
|
||||
|
|
|
@ -1744,6 +1744,7 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
|
|||
struct hlist_node *h;
|
||||
int bkt;
|
||||
|
||||
lockdep_assert_held(&vsi->mac_filter_hash_lock);
|
||||
if (vsi->info.pvid)
|
||||
return i40e_add_filter(vsi, macaddr,
|
||||
le16_to_cpu(vsi->info.pvid));
|
||||
|
|
|
@ -204,7 +204,7 @@
|
|||
#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
|
||||
#define I40E_GLGEN_MSCA_OPCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_OPCODE_SHIFT)
|
||||
#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
|
||||
#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_STCODE_SHIFT)
|
||||
#define I40E_GLGEN_MSCA_STCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_STCODE_SHIFT)
|
||||
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
|
||||
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
|
||||
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
|
||||
|
|
|
@ -40,11 +40,11 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
|
|||
#define I40E_QTX_CTL_VM_QUEUE 0x1
|
||||
#define I40E_QTX_CTL_PF_QUEUE 0x2
|
||||
|
||||
#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK
|
||||
#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(1)
|
||||
#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1)
|
||||
#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(2)
|
||||
|
||||
#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK
|
||||
#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(0)
|
||||
#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_GLGEN_MSCA_OPCODE_MASK(0)
|
||||
#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1)
|
||||
#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(3)
|
||||
|
|
|
@ -2219,8 +2219,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
|
|||
vfres->vsi_res[0].qset_handle
|
||||
= le16_to_cpu(vsi->info.qs_handle[0]);
|
||||
if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
|
||||
spin_lock_bh(&vsi->mac_filter_hash_lock);
|
||||
i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
|
||||
eth_zero_addr(vf->default_lan_addr.addr);
|
||||
spin_unlock_bh(&vsi->mac_filter_hash_lock);
|
||||
}
|
||||
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
|
||||
vf->default_lan_addr.addr);
|
||||
|
|
|
@ -518,7 +518,7 @@ enum ice_misc_thread_tasks {
|
|||
ICE_MISC_THREAD_NBITS /* must be last */
|
||||
};
|
||||
|
||||
struct ice_switchdev_info {
|
||||
struct ice_eswitch {
|
||||
struct ice_vsi *control_vsi;
|
||||
struct ice_vsi *uplink_vsi;
|
||||
struct ice_esw_br_offloads *br_offloads;
|
||||
|
@ -631,7 +631,7 @@ struct ice_pf {
|
|||
struct ice_link_default_override_tlv link_dflt_override;
|
||||
struct ice_lag *lag; /* Link Aggregation information */
|
||||
|
||||
struct ice_switchdev_info switchdev;
|
||||
struct ice_eswitch eswitch;
|
||||
struct ice_esw_br_port *br_port;
|
||||
|
||||
#define ICE_INVALID_AGG_NODE_ID 0
|
||||
|
@ -838,7 +838,7 @@ static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
|
|||
*/
|
||||
static inline bool ice_is_switchdev_running(struct ice_pf *pf)
|
||||
{
|
||||
return pf->switchdev.is_running;
|
||||
return pf->eswitch.is_running;
|
||||
}
|
||||
|
||||
#define ICE_FD_STAT_CTR_BLOCK_COUNT 256
|
||||
|
|
|
@ -16,12 +16,12 @@
|
|||
* @vf: pointer to VF struct
|
||||
*
|
||||
* This function adds advanced rule that forwards packets with
|
||||
* VF's VSI index to the corresponding switchdev ctrl VSI queue.
|
||||
* VF's VSI index to the corresponding eswitch ctrl VSI queue.
|
||||
*/
|
||||
static int
|
||||
ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
|
||||
{
|
||||
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
|
||||
struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
|
||||
struct ice_adv_rule_info rule_info = { 0 };
|
||||
struct ice_adv_lkup_elem *list;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
|
@ -59,7 +59,7 @@ ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
|
|||
* @vf: pointer to the VF struct
|
||||
*
|
||||
* Delete the advanced rule that was used to forward packets with the VF's VSI
|
||||
* index to the corresponding switchdev ctrl VSI queue.
|
||||
* index to the corresponding eswitch ctrl VSI queue.
|
||||
*/
|
||||
static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
|
||||
{
|
||||
|
@ -70,7 +70,7 @@ static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
|
|||
}
|
||||
|
||||
/**
|
||||
* ice_eswitch_setup_env - configure switchdev HW filters
|
||||
* ice_eswitch_setup_env - configure eswitch HW filters
|
||||
* @pf: pointer to PF struct
|
||||
*
|
||||
* This function adds HW filters configuration specific for switchdev
|
||||
|
@ -78,18 +78,18 @@ static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
|
|||
*/
|
||||
static int ice_eswitch_setup_env(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
|
||||
struct net_device *uplink_netdev = uplink_vsi->netdev;
|
||||
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
|
||||
struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
|
||||
struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
|
||||
struct net_device *netdev = uplink_vsi->netdev;
|
||||
struct ice_vsi_vlan_ops *vlan_ops;
|
||||
bool rule_added = false;
|
||||
|
||||
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
|
||||
|
||||
netif_addr_lock_bh(uplink_netdev);
|
||||
__dev_uc_unsync(uplink_netdev, NULL);
|
||||
__dev_mc_unsync(uplink_netdev, NULL);
|
||||
netif_addr_unlock_bh(uplink_netdev);
|
||||
netif_addr_lock_bh(netdev);
|
||||
__dev_uc_unsync(netdev, NULL);
|
||||
__dev_mc_unsync(netdev, NULL);
|
||||
netif_addr_unlock_bh(netdev);
|
||||
|
||||
if (ice_vsi_add_vlan_zero(uplink_vsi))
|
||||
goto err_def_rx;
|
||||
|
@ -132,10 +132,10 @@ err_def_rx:
|
|||
}
|
||||
|
||||
/**
|
||||
* ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
|
||||
* ice_eswitch_remap_rings_to_vectors - reconfigure rings of eswitch ctrl VSI
|
||||
* @pf: pointer to PF struct
|
||||
*
|
||||
* In switchdev number of allocated Tx/Rx rings is equal.
|
||||
* In eswitch number of allocated Tx/Rx rings is equal.
|
||||
*
|
||||
* This function fills q_vectors structures associated with representor and
|
||||
* move each ring pairs to port representor netdevs. Each port representor
|
||||
|
@ -144,7 +144,7 @@ err_def_rx:
|
|||
*/
|
||||
static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_vsi *vsi = pf->switchdev.control_vsi;
|
||||
struct ice_vsi *vsi = pf->eswitch.control_vsi;
|
||||
int q_id;
|
||||
|
||||
ice_for_each_txq(vsi, q_id) {
|
||||
|
@ -189,7 +189,7 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
|
|||
/**
|
||||
* ice_eswitch_release_reprs - clear PR VSIs configuration
|
||||
* @pf: poiner to PF struct
|
||||
* @ctrl_vsi: pointer to switchdev control VSI
|
||||
* @ctrl_vsi: pointer to eswitch control VSI
|
||||
*/
|
||||
static void
|
||||
ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
|
||||
|
@ -223,7 +223,7 @@ ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
|
|||
*/
|
||||
static int ice_eswitch_setup_reprs(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
|
||||
struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
|
||||
int max_vsi_num = 0;
|
||||
struct ice_vf *vf;
|
||||
unsigned int bkt;
|
||||
|
@ -359,7 +359,7 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
}
|
||||
|
||||
/**
|
||||
* ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
|
||||
* ice_eswitch_set_target_vsi - set eswitch context in Tx context descriptor
|
||||
* @skb: pointer to send buffer
|
||||
* @off: pointer to offload struct
|
||||
*/
|
||||
|
@ -382,7 +382,7 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
/**
|
||||
* ice_eswitch_release_env - clear switchdev HW filters
|
||||
* ice_eswitch_release_env - clear eswitch HW filters
|
||||
* @pf: pointer to PF struct
|
||||
*
|
||||
* This function removes HW filters configuration specific for switchdev
|
||||
|
@ -390,8 +390,8 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
|
|||
*/
|
||||
static void ice_eswitch_release_env(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
|
||||
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
|
||||
struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
|
||||
struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
|
||||
struct ice_vsi_vlan_ops *vlan_ops;
|
||||
|
||||
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
|
||||
|
@ -407,7 +407,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
|
|||
}
|
||||
|
||||
/**
|
||||
* ice_eswitch_vsi_setup - configure switchdev control VSI
|
||||
* ice_eswitch_vsi_setup - configure eswitch control VSI
|
||||
* @pf: pointer to PF structure
|
||||
* @pi: pointer to port_info structure
|
||||
*/
|
||||
|
@ -486,12 +486,12 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
|
||||
if (!pf->switchdev.control_vsi)
|
||||
pf->eswitch.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
|
||||
if (!pf->eswitch.control_vsi)
|
||||
return -ENODEV;
|
||||
|
||||
ctrl_vsi = pf->switchdev.control_vsi;
|
||||
pf->switchdev.uplink_vsi = uplink_vsi;
|
||||
ctrl_vsi = pf->eswitch.control_vsi;
|
||||
pf->eswitch.uplink_vsi = uplink_vsi;
|
||||
|
||||
if (ice_eswitch_setup_env(pf))
|
||||
goto err_vsi;
|
||||
|
@ -526,12 +526,12 @@ err_vsi:
|
|||
}
|
||||
|
||||
/**
|
||||
* ice_eswitch_disable_switchdev - disable switchdev resources
|
||||
* ice_eswitch_disable_switchdev - disable eswitch resources
|
||||
* @pf: pointer to PF structure
|
||||
*/
|
||||
static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
|
||||
struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
|
||||
|
||||
ice_eswitch_napi_disable(pf);
|
||||
ice_eswitch_br_offloads_deinit(pf);
|
||||
|
@ -625,7 +625,7 @@ void ice_eswitch_release(struct ice_pf *pf)
|
|||
return;
|
||||
|
||||
ice_eswitch_disable_switchdev(pf);
|
||||
pf->switchdev.is_running = false;
|
||||
pf->eswitch.is_running = false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -636,14 +636,15 @@ int ice_eswitch_configure(struct ice_pf *pf)
|
|||
{
|
||||
int status;
|
||||
|
||||
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
|
||||
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY ||
|
||||
pf->eswitch.is_running)
|
||||
return 0;
|
||||
|
||||
status = ice_eswitch_enable_switchdev(pf);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
pf->switchdev.is_running = true;
|
||||
pf->eswitch.is_running = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -693,7 +694,7 @@ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
|
|||
*/
|
||||
int ice_eswitch_rebuild(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
|
||||
struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
|
||||
int status;
|
||||
|
||||
ice_eswitch_napi_disable(pf);
|
||||
|
|
|
@ -582,10 +582,13 @@ ice_eswitch_br_switchdev_event(struct notifier_block *nb,
|
|||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
|
||||
void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
|
||||
{
|
||||
struct ice_esw_br_fdb_entry *entry, *tmp;
|
||||
|
||||
if (!bridge)
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
|
||||
ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
|
||||
}
|
||||
|
@ -947,7 +950,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
|
|||
static int
|
||||
ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf)
|
||||
{
|
||||
struct ice_vsi *vsi = pf->switchdev.uplink_vsi;
|
||||
struct ice_vsi *vsi = pf->eswitch.uplink_vsi;
|
||||
struct ice_esw_br_port *br_port;
|
||||
int err;
|
||||
|
||||
|
@ -1185,7 +1188,7 @@ ice_eswitch_br_port_event(struct notifier_block *nb,
|
|||
static void
|
||||
ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads;
|
||||
struct ice_esw_br_offloads *br_offloads = pf->eswitch.br_offloads;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
|
@ -1194,7 +1197,7 @@ ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
|
|||
|
||||
ice_eswitch_br_deinit(br_offloads, br_offloads->bridge);
|
||||
|
||||
pf->switchdev.br_offloads = NULL;
|
||||
pf->eswitch.br_offloads = NULL;
|
||||
kfree(br_offloads);
|
||||
}
|
||||
|
||||
|
@ -1205,14 +1208,14 @@ ice_eswitch_br_offloads_alloc(struct ice_pf *pf)
|
|||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (pf->switchdev.br_offloads)
|
||||
if (pf->eswitch.br_offloads)
|
||||
return ERR_PTR(-EEXIST);
|
||||
|
||||
br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL);
|
||||
if (!br_offloads)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
pf->switchdev.br_offloads = br_offloads;
|
||||
pf->eswitch.br_offloads = br_offloads;
|
||||
br_offloads->pf = pf;
|
||||
|
||||
return br_offloads;
|
||||
|
@ -1223,7 +1226,7 @@ ice_eswitch_br_offloads_deinit(struct ice_pf *pf)
|
|||
{
|
||||
struct ice_esw_br_offloads *br_offloads;
|
||||
|
||||
br_offloads = pf->switchdev.br_offloads;
|
||||
br_offloads = pf->eswitch.br_offloads;
|
||||
if (!br_offloads)
|
||||
return;
|
||||
|
||||
|
|
|
@ -116,5 +116,6 @@ void
|
|||
ice_eswitch_br_offloads_deinit(struct ice_pf *pf);
|
||||
int
|
||||
ice_eswitch_br_offloads_init(struct ice_pf *pf);
|
||||
void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge);
|
||||
|
||||
#endif /* _ICE_ESWITCH_BR_H_ */
|
||||
|
|
|
@ -84,7 +84,8 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
|
|||
|
||||
bool netif_is_ice(const struct net_device *dev)
|
||||
{
|
||||
return dev && (dev->netdev_ops == &ice_netdev_ops);
|
||||
return dev && (dev->netdev_ops == &ice_netdev_ops ||
|
||||
dev->netdev_ops == &ice_netdev_safe_mode_ops);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -517,25 +518,6 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
|
|||
pf->vf_agg_node[node].num_vsis = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_clear_sw_switch_recipes - clear switch recipes
|
||||
* @pf: board private structure
|
||||
*
|
||||
* Mark switch recipes as not created in sw structures. There are cases where
|
||||
* rules (especially advanced rules) need to be restored, either re-read from
|
||||
* hardware or added again. For example after the reset. 'recp_created' flag
|
||||
* prevents from doing that and need to be cleared upfront.
|
||||
*/
|
||||
static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_sw_recipe *recp;
|
||||
u8 i;
|
||||
|
||||
recp = pf->hw.switch_info->recp_list;
|
||||
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
|
||||
recp[i].recp_created = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_prepare_for_reset - prep for reset
|
||||
* @pf: board private structure
|
||||
|
@ -572,8 +554,9 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
|
|||
mutex_unlock(&pf->vfs.table_lock);
|
||||
|
||||
if (ice_is_eswitch_mode_switchdev(pf)) {
|
||||
if (reset_type != ICE_RESET_PFR)
|
||||
ice_clear_sw_switch_recipes(pf);
|
||||
rtnl_lock();
|
||||
ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/* release ADQ specific HW and SW resources */
|
||||
|
|
|
@ -6326,8 +6326,6 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
|
|||
if (!itr->vsi_list_info ||
|
||||
!test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
|
||||
continue;
|
||||
/* Clearing it so that the logic can add it back */
|
||||
clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
|
||||
f_entry.fltr_info.vsi_handle = vsi_handle;
|
||||
f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
|
||||
/* update the src in case it is VSI num */
|
||||
|
|
|
@ -660,7 +660,7 @@ static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
|
|||
ice_tc_is_dev_uplink(target_dev)) {
|
||||
repr = ice_netdev_to_repr(filter_dev);
|
||||
|
||||
fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi;
|
||||
fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
|
||||
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
|
||||
} else if (ice_tc_is_dev_uplink(filter_dev) &&
|
||||
ice_is_port_repr_netdev(target_dev)) {
|
||||
|
@ -772,11 +772,22 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
|
|||
rule_info.sw_act.src = hw->pf_id;
|
||||
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
|
||||
} else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
|
||||
fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) {
|
||||
fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) {
|
||||
/* VF to Uplink */
|
||||
rule_info.sw_act.flag |= ICE_FLTR_TX;
|
||||
rule_info.sw_act.src = vsi->idx;
|
||||
rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
|
||||
/* This is a specific case. The destination VSI index is
|
||||
* overwritten by the source VSI index. This type of filter
|
||||
* should allow the packet to go to the LAN, not to the
|
||||
* VSI passed here. It should set LAN_EN bit only. However,
|
||||
* the VSI must be a valid one. Setting source VSI index
|
||||
* here is safe. Even if the result from switch is set LAN_EN
|
||||
* and LB_EN (which normally will pass the packet to this VSI)
|
||||
* packet won't be seen on the VSI, because local loopback is
|
||||
* turned off.
|
||||
*/
|
||||
rule_info.sw_act.vsi_handle = vsi->idx;
|
||||
} else {
|
||||
/* VF to VF */
|
||||
rule_info.sw_act.flag |= ICE_FLTR_TX;
|
||||
|
|
|
@ -9672,6 +9672,10 @@ static void igb_io_resume(struct pci_dev *pdev)
|
|||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (netif_running(netdev)) {
|
||||
if (!test_bit(__IGB_DOWN, &adapter->state)) {
|
||||
dev_dbg(&pdev->dev, "Resuming from non-fatal error, do nothing.\n");
|
||||
return;
|
||||
}
|
||||
if (igb_up(adapter)) {
|
||||
dev_err(&pdev->dev, "igb_up failed after reset\n");
|
||||
return;
|
||||
|
|
|
@ -2036,7 +2036,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
|
|||
rx_q->queue_index = queue;
|
||||
rx_q->priv_data = priv;
|
||||
|
||||
pp_params.flags = PP_FLAG_DMA_MAP | (xdp_prog ? PP_FLAG_DMA_SYNC_DEV : 0);
|
||||
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
|
||||
pp_params.pool_size = dma_conf->dma_rx_size;
|
||||
num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
|
||||
pp_params.order = ilog2(num_pages);
|
||||
|
|
|
@ -120,7 +120,7 @@ static int bcm84881_aneg_done(struct phy_device *phydev)
|
|||
|
||||
bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR);
|
||||
if (bmsr < 0)
|
||||
return val;
|
||||
return bmsr;
|
||||
|
||||
return !!(val & MDIO_AN_STAT1_COMPLETE) &&
|
||||
!!(bmsr & BMSR_ANEGCOMPLETE);
|
||||
|
@ -146,7 +146,7 @@ static int bcm84881_read_status(struct phy_device *phydev)
|
|||
|
||||
bmsr = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_C22 + MII_BMSR);
|
||||
if (bmsr < 0)
|
||||
return val;
|
||||
return bmsr;
|
||||
|
||||
phydev->autoneg_complete = !!(val & MDIO_AN_STAT1_COMPLETE) &&
|
||||
!!(bmsr & BMSR_ANEGCOMPLETE);
|
||||
|
|
|
@ -645,7 +645,6 @@ static int dp83869_configure_fiber(struct phy_device *phydev,
|
|||
phydev->supported);
|
||||
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
|
||||
linkmode_set_bit(ADVERTISED_FIBRE, phydev->advertising);
|
||||
|
||||
if (dp83869->mode == DP83869_RGMII_1000_BASE) {
|
||||
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
|
||||
|
|
|
@ -3082,10 +3082,11 @@ static __maybe_unused int phy_led_hw_is_supported(struct led_classdev *led_cdev,
|
|||
|
||||
static void phy_leds_unregister(struct phy_device *phydev)
|
||||
{
|
||||
struct phy_led *phyled;
|
||||
struct phy_led *phyled, *tmp;
|
||||
|
||||
list_for_each_entry(phyled, &phydev->leds, list) {
|
||||
list_for_each_entry_safe(phyled, tmp, &phydev->leds, list) {
|
||||
led_classdev_unregister(&phyled->led_cdev);
|
||||
list_del(&phyled->list);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -541,7 +541,7 @@ ppp_async_encode(struct asyncppp *ap)
|
|||
* and 7 (code-reject) must be sent as though no options
|
||||
* had been negotiated.
|
||||
*/
|
||||
islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
|
||||
islcp = proto == PPP_LCP && count >= 3 && 1 <= data[2] && data[2] <= 7;
|
||||
|
||||
if (i == 0) {
|
||||
if (islcp)
|
||||
|
|
|
@ -643,46 +643,57 @@ bad:
|
|||
int
|
||||
slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
|
||||
{
|
||||
struct cstate *cs;
|
||||
unsigned ihl;
|
||||
|
||||
const struct tcphdr *th;
|
||||
unsigned char index;
|
||||
struct iphdr *iph;
|
||||
struct cstate *cs;
|
||||
unsigned int ihl;
|
||||
|
||||
if(isize < 20) {
|
||||
/* The packet is shorter than a legal IP header */
|
||||
/* The packet is shorter than a legal IP header.
|
||||
* Also make sure isize is positive.
|
||||
*/
|
||||
if (isize < (int)sizeof(struct iphdr)) {
|
||||
runt:
|
||||
comp->sls_i_runt++;
|
||||
return slhc_toss( comp );
|
||||
return slhc_toss(comp);
|
||||
}
|
||||
iph = (struct iphdr *)icp;
|
||||
/* Peek at the IP header's IHL field to find its length */
|
||||
ihl = icp[0] & 0xf;
|
||||
if(ihl < 20 / 4){
|
||||
/* The IP header length field is too small */
|
||||
comp->sls_i_runt++;
|
||||
return slhc_toss( comp );
|
||||
}
|
||||
index = icp[9];
|
||||
icp[9] = IPPROTO_TCP;
|
||||
ihl = iph->ihl;
|
||||
/* The IP header length field is too small,
|
||||
* or packet is shorter than the IP header followed
|
||||
* by minimal tcp header.
|
||||
*/
|
||||
if (ihl < 5 || isize < ihl * 4 + sizeof(struct tcphdr))
|
||||
goto runt;
|
||||
|
||||
index = iph->protocol;
|
||||
iph->protocol = IPPROTO_TCP;
|
||||
|
||||
if (ip_fast_csum(icp, ihl)) {
|
||||
/* Bad IP header checksum; discard */
|
||||
comp->sls_i_badcheck++;
|
||||
return slhc_toss( comp );
|
||||
return slhc_toss(comp);
|
||||
}
|
||||
if(index > comp->rslot_limit) {
|
||||
if (index > comp->rslot_limit) {
|
||||
comp->sls_i_error++;
|
||||
return slhc_toss(comp);
|
||||
}
|
||||
|
||||
th = (struct tcphdr *)(icp + ihl * 4);
|
||||
if (th->doff < sizeof(struct tcphdr) / 4)
|
||||
goto runt;
|
||||
if (isize < ihl * 4 + th->doff * 4)
|
||||
goto runt;
|
||||
/* Update local state */
|
||||
cs = &comp->rstate[comp->recv_current = index];
|
||||
comp->flags &=~ SLF_TOSS;
|
||||
memcpy(&cs->cs_ip,icp,20);
|
||||
memcpy(&cs->cs_tcp,icp + ihl*4,20);
|
||||
memcpy(&cs->cs_ip, iph, sizeof(*iph));
|
||||
memcpy(&cs->cs_tcp, th, sizeof(*th));
|
||||
if (ihl > 5)
|
||||
memcpy(cs->cs_ipopt, icp + sizeof(struct iphdr), (ihl - 5) * 4);
|
||||
if (cs->cs_tcp.doff > 5)
|
||||
memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
|
||||
cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
|
||||
memcpy(cs->cs_ipopt, &iph[1], (ihl - 5) * 4);
|
||||
if (th->doff > 5)
|
||||
memcpy(cs->cs_tcpopt, &th[1], (th->doff - 5) * 4);
|
||||
cs->cs_hsize = ihl*2 + th->doff*2;
|
||||
cs->initialized = true;
|
||||
/* Put headers back on packet
|
||||
* Neither header checksum is recalculated
|
||||
|
|
|
@ -4819,9 +4819,13 @@ static int __init vxlan_init_module(void)
|
|||
if (rc)
|
||||
goto out4;
|
||||
|
||||
vxlan_vnifilter_init();
|
||||
rc = vxlan_vnifilter_init();
|
||||
if (rc)
|
||||
goto out5;
|
||||
|
||||
return 0;
|
||||
out5:
|
||||
rtnl_link_unregister(&vxlan_link_ops);
|
||||
out4:
|
||||
unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
|
||||
out3:
|
||||
|
|
|
@ -202,7 +202,7 @@ int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan,
|
|||
int vxlan_vnigroup_init(struct vxlan_dev *vxlan);
|
||||
void vxlan_vnigroup_uninit(struct vxlan_dev *vxlan);
|
||||
|
||||
void vxlan_vnifilter_init(void);
|
||||
int vxlan_vnifilter_init(void);
|
||||
void vxlan_vnifilter_uninit(void);
|
||||
void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni,
|
||||
struct vxlan_vni_node *vninode,
|
||||
|
|
|
@ -992,19 +992,18 @@ static int vxlan_vnifilter_process(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
return err;
|
||||
}
|
||||
|
||||
void vxlan_vnifilter_init(void)
|
||||
static const struct rtnl_msg_handler vxlan_vnifilter_rtnl_msg_handlers[] = {
|
||||
{THIS_MODULE, PF_BRIDGE, RTM_GETTUNNEL, NULL, vxlan_vnifilter_dump, 0},
|
||||
{THIS_MODULE, PF_BRIDGE, RTM_NEWTUNNEL, vxlan_vnifilter_process, NULL, 0},
|
||||
{THIS_MODULE, PF_BRIDGE, RTM_DELTUNNEL, vxlan_vnifilter_process, NULL, 0},
|
||||
};
|
||||
|
||||
int vxlan_vnifilter_init(void)
|
||||
{
|
||||
rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETTUNNEL, NULL,
|
||||
vxlan_vnifilter_dump, 0);
|
||||
rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWTUNNEL,
|
||||
vxlan_vnifilter_process, NULL, 0);
|
||||
rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELTUNNEL,
|
||||
vxlan_vnifilter_process, NULL, 0);
|
||||
return rtnl_register_many(vxlan_vnifilter_rtnl_msg_handlers);
|
||||
}
|
||||
|
||||
void vxlan_vnifilter_uninit(void)
|
||||
{
|
||||
rtnl_unregister(PF_BRIDGE, RTM_GETTUNNEL);
|
||||
rtnl_unregister(PF_BRIDGE, RTM_NEWTUNNEL);
|
||||
rtnl_unregister(PF_BRIDGE, RTM_DELTUNNEL);
|
||||
rtnl_unregister_many(vxlan_vnifilter_rtnl_msg_handlers);
|
||||
}
|
||||
|
|
|
@ -1554,6 +1554,7 @@ static void switchtec_ntb_remove(struct device *dev)
|
|||
switchtec_ntb_deinit_db_msg_irq(sndev);
|
||||
switchtec_ntb_deinit_shared_mw(sndev);
|
||||
switchtec_ntb_deinit_crosslink(sndev);
|
||||
cancel_work_sync(&sndev->check_link_status_work);
|
||||
kfree(sndev);
|
||||
dev_info(dev, "ntb device unregistered\n");
|
||||
}
|
||||
|
|
|
@ -44,6 +44,15 @@ static int virtio_pmem_flush(struct nd_region *nd_region)
|
|||
unsigned long flags;
|
||||
int err, err1;
|
||||
|
||||
/*
|
||||
* Don't bother to submit the request to the device if the device is
|
||||
* not activated.
|
||||
*/
|
||||
if (vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_NEEDS_RESET) {
|
||||
dev_info(&vdev->dev, "virtio pmem device needs a reset\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
might_sleep();
|
||||
req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
|
||||
if (!req_data)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue