Merge linux 6.6.35

Conflicts:
	arch/x86/kvm/svm/sev.c
	arch/x86/kvm/svm/svm.h
	drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
	drivers/usb/host/xhci-pci.c

Signed-off-by: Jianping Liu <frankjpliu@tencent.com>
This commit is contained in:
Jianping Liu 2024-06-25 15:00:53 +08:00
commit 7a24449c89
249 changed files with 3416 additions and 1951 deletions

View File

@ -8,7 +8,7 @@ else
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 34
SUBLEVEL = 35
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -31,18 +31,17 @@ void flush_cache_all_local(void);
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
void flush_kernel_dcache_page_addr(const void *addr);
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
/* The only way to flush a vmap range is to flush whole cache */
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
void flush_kernel_vmap_range(void *vaddr, int size);
void invalidate_kernel_vmap_range(void *vaddr, int size);
#define flush_cache_vmap(start, end) flush_cache_all()
void flush_cache_vmap(unsigned long start, unsigned long end);
#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_cache_all()
void flush_cache_vunmap(unsigned long start, unsigned long end);
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_folio flush_dcache_folio
@ -77,17 +76,11 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
/* defined in pacache.S exported in cache.c used by flush_anon_page */
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
#define ARCH_HAS_FLUSH_ANON_PAGE
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
#define ARCH_HAS_FLUSH_ON_KUNMAP
static inline void kunmap_flush_on_unmap(const void *addr)
{
flush_kernel_dcache_page_addr(addr);
}
void kunmap_flush_on_unmap(const void *addr);
#endif /* _PARISC_CACHEFLUSH_H */

View File

@ -448,14 +448,17 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
return pte;
}
static inline pte_t ptep_get(pte_t *ptep)
{
return READ_ONCE(*ptep);
}
#define ptep_get ptep_get
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
pte_t pte;
if (!pte_young(*ptep))
return 0;
pte = *ptep;
pte = ptep_get(ptep);
if (!pte_young(pte)) {
return 0;
}
@ -463,17 +466,10 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
return 1;
}
int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
struct mm_struct;
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t old_pte;
old_pte = *ptep;
set_pte(ptep, __pte(0));
return old_pte;
}
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
set_pte(ptep, pte_wrprotect(*ptep));
@ -511,7 +507,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME

View File

@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/syscalls.h>
#include <linux/vmalloc.h>
#include <asm/pdc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
@ -31,20 +32,31 @@
#include <asm/mmu_context.h>
#include <asm/cachectl.h>
#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
/*
* When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
* of page flushes done flush_cache_page_if_present. There are some
* pros and cons in using this option. It may increase the risk of
* random segmentation faults.
*/
#define CONFIG_FLUSH_PAGE_ACCESSED 0
int split_tlb __ro_after_init;
int dcache_stride __ro_after_init;
int icache_stride __ro_after_init;
EXPORT_SYMBOL(dcache_stride);
/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
EXPORT_SYMBOL(flush_dcache_page_asm);
void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_data_cache_local(void *); /* flushes local data-cache only */
void flush_instruction_cache_local(void); /* flushes local code-cache only */
static void flush_kernel_dcache_page_addr(const void *addr);
/* On some machines (i.e., ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
* by software. We need a spinlock around all TLB flushes to ensure
@ -317,6 +329,18 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
{
if (!static_branch_likely(&parisc_has_cache))
return;
/*
* The TLB is the engine of coherence on parisc. The CPU is
* entitled to speculate any page with a TLB mapping, so here
* we kill the mapping then flush the page along a special flush
* only alias mapping. This guarantees that the page is no-longer
* in the cache for any process and nor may it be speculatively
* read in (until the user or kernel specifically accesses it,
* of course).
*/
flush_tlb_page(vma, vmaddr);
preempt_disable();
flush_dcache_page_asm(physaddr, vmaddr);
if (vma->vm_flags & VM_EXEC)
@ -324,46 +348,44 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
preempt_enable();
}
static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
static void flush_kernel_dcache_page_addr(const void *addr)
{
unsigned long flags, space, pgd, prot;
#ifdef CONFIG_TLB_PTLOCK
unsigned long pgd_lock;
#endif
unsigned long vaddr = (unsigned long)addr;
unsigned long flags;
vmaddr &= PAGE_MASK;
/* Purge TLB entry to remove translation on all CPUs */
purge_tlb_start(flags);
pdtlb(SR_KERNEL, addr);
purge_tlb_end(flags);
/* Use tmpalias flush to prevent data cache move-in */
preempt_disable();
/* Set context for flush */
local_irq_save(flags);
prot = mfctl(8);
space = mfsp(SR_USER);
pgd = mfctl(25);
#ifdef CONFIG_TLB_PTLOCK
pgd_lock = mfctl(28);
#endif
switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
local_irq_restore(flags);
flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
flush_tlb_page(vma, vmaddr);
/* Restore previous context */
local_irq_save(flags);
#ifdef CONFIG_TLB_PTLOCK
mtctl(pgd_lock, 28);
#endif
mtctl(pgd, 25);
mtsp(space, SR_USER);
mtctl(prot, 8);
local_irq_restore(flags);
flush_dcache_page_asm(__pa(vaddr), vaddr);
preempt_enable();
}
static void flush_kernel_icache_page_addr(const void *addr)
{
unsigned long vaddr = (unsigned long)addr;
unsigned long flags;
/* Purge TLB entry to remove translation on all CPUs */
purge_tlb_start(flags);
pdtlb(SR_KERNEL, addr);
purge_tlb_end(flags);
/* Use tmpalias flush to prevent instruction cache move-in */
preempt_disable();
flush_icache_page_asm(__pa(vaddr), vaddr);
preempt_enable();
}
void kunmap_flush_on_unmap(const void *addr)
{
flush_kernel_dcache_page_addr(addr);
}
EXPORT_SYMBOL(kunmap_flush_on_unmap);
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
unsigned int nr)
{
@ -371,13 +393,16 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
for (;;) {
flush_kernel_dcache_page_addr(kaddr);
flush_kernel_icache_page(kaddr);
flush_kernel_icache_page_addr(kaddr);
if (--nr == 0)
break;
kaddr += PAGE_SIZE;
}
}
/*
* Walk page directory for MM to find PTEP pointer for address ADDR.
*/
static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
{
pte_t *ptep = NULL;
@ -406,6 +431,41 @@ static inline bool pte_needs_flush(pte_t pte)
== (_PAGE_PRESENT | _PAGE_ACCESSED);
}
/*
* Return user physical address. Returns 0 if page is not present.
*/
static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
{
unsigned long flags, space, pgd, prot, pa;
#ifdef CONFIG_TLB_PTLOCK
unsigned long pgd_lock;
#endif
/* Save context */
local_irq_save(flags);
prot = mfctl(8);
space = mfsp(SR_USER);
pgd = mfctl(25);
#ifdef CONFIG_TLB_PTLOCK
pgd_lock = mfctl(28);
#endif
/* Set context for lpa_user */
switch_mm_irqs_off(NULL, mm, NULL);
pa = lpa_user(addr);
/* Restore previous context */
#ifdef CONFIG_TLB_PTLOCK
mtctl(pgd_lock, 28);
#endif
mtctl(pgd, 25);
mtsp(space, SR_USER);
mtctl(prot, 8);
local_irq_restore(flags);
return pa;
}
void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping = folio_flush_mapping(folio);
@ -454,50 +514,23 @@ void flush_dcache_folio(struct folio *folio)
if (addr + nr * PAGE_SIZE > vma->vm_end)
nr = (vma->vm_end - addr) / PAGE_SIZE;
if (parisc_requires_coherency()) {
for (i = 0; i < nr; i++) {
pte_t *ptep = get_ptep(vma->vm_mm,
addr + i * PAGE_SIZE);
if (!ptep)
continue;
if (pte_needs_flush(*ptep))
flush_user_cache_page(vma,
addr + i * PAGE_SIZE);
/* Optimise accesses to the same table? */
pte_unmap(ptep);
}
} else {
/*
* The TLB is the engine of coherence on parisc:
* The CPU is entitled to speculate any page
* with a TLB mapping, so here we kill the
* mapping then flush the page along a special
* flush only alias mapping. This guarantees that
* the page is no-longer in the cache for any
* process and nor may it be speculatively read
* in (until the user or kernel specifically
* accesses it, of course)
*/
for (i = 0; i < nr; i++)
flush_tlb_page(vma, addr + i * PAGE_SIZE);
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
!= (addr & (SHM_COLOUR - 1))) {
for (i = 0; i < nr; i++)
__flush_cache_page(vma,
addr + i * PAGE_SIZE,
(pfn + i) * PAGE_SIZE);
/*
* Software is allowed to have any number
* of private mappings to a page.
*/
if (!(vma->vm_flags & VM_SHARED))
continue;
if (old_addr)
pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
old_addr, addr, vma->vm_file);
if (nr == folio_nr_pages(folio))
old_addr = addr;
}
for (i = 0; i < nr; i++)
__flush_cache_page(vma,
addr + i * PAGE_SIZE,
(pfn + i) * PAGE_SIZE);
/*
* Software is allowed to have any number
* of private mappings to a page.
*/
if (!(vma->vm_flags & VM_SHARED))
continue;
if (old_addr)
pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
old_addr, addr, vma->vm_file);
if (nr == folio_nr_pages(folio))
old_addr = addr;
}
WARN_ON(++count == 4096);
}
@ -587,35 +620,28 @@ extern void purge_kernel_dcache_page_asm(unsigned long);
extern void clear_user_page_asm(void *, unsigned long);
extern void copy_user_page_asm(void *, void *, unsigned long);
void flush_kernel_dcache_page_addr(const void *addr)
{
unsigned long flags;
flush_kernel_dcache_page_asm(addr);
purge_tlb_start(flags);
pdtlb(SR_KERNEL, addr);
purge_tlb_end(flags);
}
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
static void flush_cache_page_if_present(struct vm_area_struct *vma,
unsigned long vmaddr, unsigned long pfn)
unsigned long vmaddr)
{
#if CONFIG_FLUSH_PAGE_ACCESSED
bool needs_flush = false;
pte_t *ptep;
pte_t *ptep, pte;
/*
* The pte check is racy and sometimes the flush will trigger
* a non-access TLB miss. Hopefully, the page has already been
* flushed.
*/
ptep = get_ptep(vma->vm_mm, vmaddr);
if (ptep) {
needs_flush = pte_needs_flush(*ptep);
pte = ptep_get(ptep);
needs_flush = pte_needs_flush(pte);
pte_unmap(ptep);
}
if (needs_flush)
flush_cache_page(vma, vmaddr, pfn);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
#else
struct mm_struct *mm = vma->vm_mm;
unsigned long physaddr = get_upa(mm, vmaddr);
if (physaddr)
__flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
#endif
}
void copy_user_highpage(struct page *to, struct page *from,
@ -625,7 +651,7 @@ void copy_user_highpage(struct page *to, struct page *from,
kfrom = kmap_local_page(from);
kto = kmap_local_page(to);
flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
__flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
copy_page_asm(kto, kfrom);
kunmap_local(kto);
kunmap_local(kfrom);
@ -634,16 +660,17 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
__flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
}
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
__flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
}
/* __flush_tlb_range()
@ -677,32 +704,10 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
unsigned long addr, pfn;
pte_t *ptep;
unsigned long addr;
for (addr = start; addr < end; addr += PAGE_SIZE) {
bool needs_flush = false;
/*
* The vma can contain pages that aren't present. Although
* the pte search is expensive, we need the pte to find the
* page pfn and to check whether the page should be flushed.
*/
ptep = get_ptep(vma->vm_mm, addr);
if (ptep) {
needs_flush = pte_needs_flush(*ptep);
pfn = pte_pfn(*ptep);
pte_unmap(ptep);
}
if (needs_flush) {
if (parisc_requires_coherency()) {
flush_user_cache_page(vma, addr);
} else {
if (WARN_ON(!pfn_valid(pfn)))
return;
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
}
for (addr = start; addr < end; addr += PAGE_SIZE)
flush_cache_page_if_present(vma, addr);
}
static inline unsigned long mm_total_size(struct mm_struct *mm)
@ -753,21 +758,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
return;
flush_tlb_range(vma, start, end);
flush_cache_all();
if (vma->vm_flags & VM_EXEC)
flush_cache_all();
else
flush_data_cache();
return;
}
flush_cache_pages(vma, start, end);
flush_cache_pages(vma, start & PAGE_MASK, end);
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
if (WARN_ON(!pfn_valid(pfn)))
return;
if (parisc_requires_coherency())
flush_user_cache_page(vma, vmaddr);
else
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
@ -775,34 +778,133 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon
if (!PageAnon(page))
return;
if (parisc_requires_coherency()) {
if (vma->vm_flags & VM_SHARED)
flush_data_cache();
else
flush_user_cache_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
}
int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep)
{
pte_t pte = ptep_get(ptep);
if (!pte_young(pte))
return 0;
set_pte(ptep, pte_mkold(pte));
#if CONFIG_FLUSH_PAGE_ACCESSED
__flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
#endif
return 1;
}
/*
* After a PTE is cleared, we have no way to flush the cache for
* the physical page. On PA8800 and PA8900 processors, these lines
* can cause random cache corruption. Thus, we must flush the cache
* as well as the TLB when clearing a PTE that's valid.
*/
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep)
{
struct mm_struct *mm = (vma)->vm_mm;
pte_t pte = ptep_get_and_clear(mm, addr, ptep);
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn))
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
else if (pte_accessible(mm, pte))
flush_tlb_page(vma, addr);
return pte;
}
/*
* The physical address for pages in the ioremap case can be obtained
* from the vm_struct struct. I wasn't able to successfully handle the
* vmalloc and vmap cases. We have an array of struct page pointers in
* the uninitialized vmalloc case but the flush failed using page_to_pfn.
*/
void flush_cache_vmap(unsigned long start, unsigned long end)
{
unsigned long addr, physaddr;
struct vm_struct *vm;
/* Prevent cache move-in */
flush_tlb_kernel_range(start, end);
if (end - start >= parisc_cache_flush_threshold) {
flush_cache_all();
return;
}
flush_tlb_page(vma, vmaddr);
preempt_disable();
flush_dcache_page_asm(page_to_phys(page), vmaddr);
preempt_enable();
}
if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
flush_cache_all();
return;
}
vm = find_vm_area((void *)start);
if (WARN_ON_ONCE(!vm)) {
flush_cache_all();
return;
}
/* The physical addresses of IOREMAP regions are contiguous */
if (vm->flags & VM_IOREMAP) {
physaddr = vm->phys_addr;
for (addr = start; addr < end; addr += PAGE_SIZE) {
preempt_disable();
flush_dcache_page_asm(physaddr, start);
flush_icache_page_asm(physaddr, start);
preempt_enable();
physaddr += PAGE_SIZE;
}
return;
}
flush_cache_all();
}
EXPORT_SYMBOL(flush_cache_vmap);
/*
* The vm_struct has been retired and the page table is set up. The
* last page in the range is a guard page. Its physical address can't
* be determined using lpa, so there is no way to flush the range
* using flush_dcache_page_asm.
*/
void flush_cache_vunmap(unsigned long start, unsigned long end)
{
/* Prevent cache move-in */
flush_tlb_kernel_range(start, end);
flush_data_cache();
}
EXPORT_SYMBOL(flush_cache_vunmap);
/*
* On systems with PA8800/PA8900 processors, there is no way to flush
* a vmap range other than using the architected loop to flush the
* entire cache. The page directory is not set up, so we can't use
* fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
* L2 is physically indexed but FDCE/FICE instructions in virtual
* mode output their virtual address on the core bus, not their
* real address. As a result, the L2 cache index formed from the
* virtual address will most likely not be the same as the L2 index
* formed from the real address.
*/
void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end);
flush_data_cache();
flush_tlb_kernel_range(start, end);
if (!static_branch_likely(&parisc_has_dcache))
return;
/* If interrupts are disabled, we can only do local flush */
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
flush_data_cache_local(NULL);
return;
}
flush_kernel_dcache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
flush_data_cache();
}
EXPORT_SYMBOL(flush_kernel_vmap_range);
@ -814,15 +916,18 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
/* Ensure DMA is complete */
asm_syncdma();
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end);
flush_data_cache();
flush_tlb_kernel_range(start, end);
if (!static_branch_likely(&parisc_has_dcache))
return;
/* If interrupts are disabled, we can only do local flush */
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
flush_data_cache_local(NULL);
return;
}
purge_kernel_dcache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
flush_data_cache();
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);

View File

@ -92,9 +92,25 @@ __pu_failed: \
: label)
#endif
#ifdef CONFIG_CC_IS_CLANG
#define DS_FORM_CONSTRAINT "Z<>"
#else
#define DS_FORM_CONSTRAINT "YZ<>"
#endif
#ifdef __powerpc64__
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm2_goto(x, ptr, label) \
__put_user_asm_goto(x, ptr, label, "std")
#else
#define __put_user_asm2_goto(x, addr, label) \
asm goto ("1: std%U1%X1 %0,%1 # put_user\n" \
EX_TABLE(1b, %l2) \
: \
: "r" (x), DS_FORM_CONSTRAINT (*addr) \
: \
: label)
#endif // CONFIG_PPC_KERNEL_PREFIXED
#else /* __powerpc64__ */
#define __put_user_asm2_goto(x, addr, label) \
asm goto( \

View File

@ -237,10 +237,11 @@ static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
{
u32 hart, group = 0;
u32 hart = 0, group = 0;
hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
GENMASK_ULL(aia->nr_hart_bits - 1, 0);
if (aia->nr_hart_bits)
hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
GENMASK_ULL(aia->nr_hart_bits - 1, 0);
if (aia->nr_group_bits)
group = (addr >> aia->nr_group_shift) &
GENMASK_ULL(aia->nr_group_bits - 1, 0);

View File

@ -614,9 +614,9 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
switch (reg_subtype) {
case KVM_REG_RISCV_ISA_SINGLE:
return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
case KVM_REG_RISCV_SBI_MULTI_EN:
case KVM_REG_RISCV_ISA_MULTI_EN:
return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
case KVM_REG_RISCV_SBI_MULTI_DIS:
case KVM_REG_RISCV_ISA_MULTI_DIS:
return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
default:
return -ENOENT;

View File

@ -234,18 +234,19 @@ static void __init setup_bootmem(void)
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
/*
* memblock allocator is not aware of the fact that last 4K bytes of
* the addressable memory can not be mapped because of IS_ERR_VALUE
* macro. Make sure that last 4k bytes are not usable by memblock
* if end of dram is equal to maximum addressable memory. For 64-bit
* kernel, this problem can't happen here as the end of the virtual
* address space is occupied by the kernel mapping then this check must
* be done as soon as the kernel mapping base address is determined.
* Reserve physical address space that would be mapped to virtual
* addresses greater than (void *)(-PAGE_SIZE) because:
* - This memory would overlap with ERR_PTR
* - This memory belongs to high memory, which is not supported
*
* This is not applicable to 64-bit kernel, because virtual addresses
* after (void *)(-PAGE_SIZE) are not linearly mapped: they are
* occupied by kernel mapping. Also it is unrealistic for high memory
* to exist on 64-bit platforms.
*/
if (!IS_ENABLED(CONFIG_64BIT)) {
max_mapped_addr = __pa(~(ulong)0);
if (max_mapped_addr == (phys_ram_end - 1))
memblock_set_current_limit(max_mapped_addr - 4096);
max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE);
memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
}
min_low_pfn = PFN_UP(phys_ram_base);

View File

@ -387,17 +387,33 @@ int set_direct_map_default_noflush(struct page *page)
}
#ifdef CONFIG_DEBUG_PAGEALLOC
static int debug_pagealloc_set_page(pte_t *pte, unsigned long addr, void *data)
{
int enable = *(int *)data;
unsigned long val = pte_val(ptep_get(pte));
if (enable)
val |= _PAGE_PRESENT;
else
val &= ~_PAGE_PRESENT;
set_pte(pte, __pte(val));
return 0;
}
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (!debug_pagealloc_enabled())
return;
if (enable)
__set_memory((unsigned long)page_address(page), numpages,
__pgprot(_PAGE_PRESENT), __pgprot(0));
else
__set_memory((unsigned long)page_address(page), numpages,
__pgprot(0), __pgprot(_PAGE_PRESENT));
unsigned long start = (unsigned long)page_address(page);
unsigned long size = PAGE_SIZE * numpages;
apply_to_existing_page_range(&init_mm, start, size, debug_pagealloc_set_page, &enable);
flush_tlb_kernel_range(start, start + size);
}
#endif

View File

@ -117,9 +117,9 @@ vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o
vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o
vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
$(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE
$(call if_changed,ld)
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S

View File

@ -119,8 +119,8 @@ static void init_heap(void)
char *stack_end;
if (boot_params.hdr.loadflags & CAN_USE_HEAP) {
asm("leal %P1(%%esp),%0"
: "=r" (stack_end) : "i" (-STACK_SIZE));
asm("leal %n1(%%esp),%0"
: "=r" (stack_end) : "i" (STACK_SIZE));
heap_end = (char *)
((size_t)boot_params.hdr.heap_end_ptr + 0x200);

View File

@ -288,10 +288,10 @@ static inline int alternatives_text_reserved(void *start, void *end)
* Otherwise, if CPU has feature1, newinstr1 is used.
* Otherwise, oldinstr is used.
*/
#define alternative_input_2(oldinstr, newinstr1, ft_flags1, newinstr2, \
ft_flags2, input...) \
asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, ft_flags1, \
newinstr2, ft_flags2) \
#define alternative_input_2(oldinstr, newinstr1, ft_flags1, newinstr2, \
ft_flags2, input...) \
asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, ft_flags1, \
newinstr2, ft_flags2) \
: : "i" (0), ## input)
/* Like alternative_input, but with a single output argument */
@ -301,7 +301,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
/* Like alternative_io, but for replacing a direct call with another one. */
#define alternative_call(oldfunc, newfunc, ft_flags, output, input...) \
asm_inline volatile (ALTERNATIVE("call %P[old]", "call %P[new]", ft_flags) \
asm_inline volatile (ALTERNATIVE("call %c[old]", "call %c[new]", ft_flags) \
: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
/*
@ -310,12 +310,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
* Otherwise, if CPU has feature1, function1 is used.
* Otherwise, old function is used.
*/
#define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2, \
output, input...) \
asm_inline volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", ft_flags1,\
"call %P[new2]", ft_flags2) \
: output, ASM_CALL_CONSTRAINT \
: [old] "i" (oldfunc), [new1] "i" (newfunc1), \
#define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2, \
output, input...) \
asm_inline volatile (ALTERNATIVE_2("call %c[old]", "call %c[new1]", ft_flags1, \
"call %c[new2]", ft_flags2) \
: output, ASM_CALL_CONSTRAINT \
: [old] "i" (oldfunc), [new1] "i" (newfunc1), \
[new2] "i" (newfunc2), ## input)
/*

View File

@ -24,7 +24,7 @@ typedef struct {
#ifdef CONFIG_X86_CMPXCHG64
#define __alternative_atomic64(f, g, out, in...) \
asm volatile("call %P[func]" \
asm volatile("call %c[func]" \
: out : [func] "i" (atomic64_##g##_cx8), ## in)
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)

View File

@ -207,7 +207,7 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
static __always_inline bool _static_cpu_has(u16 bit)
{
asm goto(
ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]")
ALTERNATIVE_TERNARY("jmp 6f", %c[feature], "", "jmp %l[t_no]")
".pushsection .altinstr_aux,\"ax\"\n"
"6:\n"
" testb %[bitnum]," _ASM_RIP(%P[cap_byte]) "\n"

View File

@ -100,7 +100,7 @@
}
#define ASM_CALL_ARG0 \
"call %P[__func] \n" \
"call %c[__func] \n" \
ASM_REACHABLE
#define ASM_CALL_ARG1 \

View File

@ -78,10 +78,10 @@ extern int __get_user_bad(void);
int __ret_gu; \
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
__chk_user_ptr(ptr); \
asm volatile("call __" #fn "_%P4" \
asm volatile("call __" #fn "_%c[size]" \
: "=a" (__ret_gu), "=r" (__val_gu), \
ASM_CALL_CONSTRAINT \
: "0" (ptr), "i" (sizeof(*(ptr)))); \
: "0" (ptr), [size] "i" (sizeof(*(ptr)))); \
instrument_get_user(__val_gu); \
(x) = (__force __typeof__(*(ptr))) __val_gu; \
__builtin_expect(__ret_gu, 0); \
@ -177,7 +177,7 @@ extern void __put_user_nocheck_8(void);
__chk_user_ptr(__ptr); \
__ptr_pu = __ptr; \
__val_pu = __x; \
asm volatile("call __" #fn "_%P[size]" \
asm volatile("call __" #fn "_%c[size]" \
: "=c" (__ret_pu), \
ASM_CALL_CONSTRAINT \
: "0" (__ptr_pu), \

View File

@ -222,7 +222,14 @@ out:
int amd_smn_read(u16 node, u32 address, u32 *value)
{
return __amd_smn_rw(node, address, value, false);
int err = __amd_smn_rw(node, address, value, false);
if (PCI_POSSIBLE_ERROR(*value)) {
err = -ENODEV;
*value = 0;
}
return err;
}
EXPORT_SYMBOL_GPL(amd_smn_read);

View File

@ -298,8 +298,15 @@ void machine_kexec_cleanup(struct kimage *image)
void machine_kexec(struct kimage *image)
{
unsigned long page_list[PAGES_NR];
void *control_page;
unsigned int host_mem_enc_active;
int save_ftrace_enabled;
void *control_page;
/*
* This must be done before load_segments() since if call depth tracking
* is used then GS must be valid to make any function calls.
*/
host_mem_enc_active = cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT);
#ifdef CONFIG_KEXEC_JUMP
if (image->preserve_context)
@ -361,7 +368,7 @@ void machine_kexec(struct kimage *image)
(unsigned long)page_list,
image->start,
image->preserve_context,
cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT));
host_mem_enc_active);
#ifdef CONFIG_KEXEC_JUMP
if (image->preserve_context)

View File

@ -788,6 +788,13 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
csv2_sync_reset_vmsa(svm);
}
/*
* SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it
* only after setting guest_state_protected because KVM_SET_MSRS allows
* dynamic toggling of LBRV (for performance reason) on write access to
* MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set.
*/
svm_enable_lbrv(vcpu);
return 0;
}
@ -2404,6 +2411,12 @@ void __init sev_hardware_setup(void)
if (!boot_cpu_has(X86_FEATURE_SEV_ES))
goto out;
if (!lbrv) {
WARN_ONCE(!boot_cpu_has(X86_FEATURE_LBRV),
"LBRV must be present for SEV-ES support");
goto out;
}
if (is_x86_vendor_hygon()) {
/*
* Ths ASIDs from 1 to max_sev_asid are available for hygon
@ -3211,7 +3224,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
struct kvm_vcpu *vcpu = &svm->vcpu;
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
/*
* An SEV-ES guest requires a VMSA area that is a separate from the
@ -3263,10 +3275,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
/* Clear intercepts on selected MSRs */
set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
}
void sev_init_vmcb(struct vcpu_svm *svm)

View File

@ -103,6 +103,7 @@ static const struct svm_direct_access_msrs {
{ .index = MSR_IA32_SPEC_CTRL, .always = false },
{ .index = MSR_IA32_PRED_CMD, .always = false },
{ .index = MSR_IA32_FLUSH_CMD, .always = false },
{ .index = MSR_IA32_DEBUGCTLMSR, .always = false },
{ .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
@ -219,7 +220,7 @@ int vgif = true;
module_param(vgif, int, 0444);
/* enable/disable LBR virtualization */
static int lbrv = true;
int lbrv = true;
module_param(lbrv, int, 0444);
static int tsc_scaling = true;
@ -1013,7 +1014,7 @@ void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
vmcb_mark_dirty(to_vmcb, VMCB_LBR);
}
static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
void svm_enable_lbrv(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@ -1023,6 +1024,9 @@ static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
if (sev_es_guest(vcpu->kvm))
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR, 1, 1);
/* Move the LBR msrs to the vmcb02 so that the guest can see them. */
if (is_guest_mode(vcpu))
svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
@ -1032,6 +1036,8 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
@ -5316,6 +5322,12 @@ static __init int svm_hardware_setup(void)
nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
if (lbrv) {
if (!boot_cpu_has(X86_FEATURE_LBRV))
lbrv = false;
else
pr_info("LBR virtualization supported\n");
}
/*
* Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
* may be modified by svm_adjust_mmio_mask()), as well as nrips.
@ -5369,14 +5381,6 @@ static __init int svm_hardware_setup(void)
svm_x86_ops.set_vnmi_pending = NULL;
}
if (lbrv) {
if (!boot_cpu_has(X86_FEATURE_LBRV))
lbrv = false;
else
pr_info("LBR virtualization supported\n");
}
if (!enable_pmu)
pr_info("PMU virtualization is disabled\n");

View File

@ -30,7 +30,7 @@
#define IOPM_SIZE PAGE_SIZE * 3
#define MSRPM_SIZE PAGE_SIZE * 2
#define MAX_DIRECT_ACCESS_MSRS 47
#define MAX_DIRECT_ACCESS_MSRS 48
#define MSRPM_OFFSETS 32
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
@ -39,6 +39,7 @@ extern int vgif;
extern bool intercept_smi;
extern bool x2avic_enabled;
extern bool vnmi;
extern int lbrv;
/*
* Clean bits in VMCB.
@ -548,6 +549,7 @@ u32 *svm_vcpu_alloc_msrpm(void);
void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
void svm_vcpu_free_msrpm(u32 *msrpm);
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
void svm_enable_lbrv(struct kvm_vcpu *vcpu);
void svm_update_lbrv(struct kvm_vcpu *vcpu);
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);

View File

@ -44,7 +44,11 @@
or %rdx, %rax
.else
cmp $TASK_SIZE_MAX-\size+1, %eax
.if \size != 8
jae .Lbad_get_user
.else
jae .Lbad_get_user_8
.endif
sbb %edx, %edx /* array_index_mask_nospec() */
and %edx, %eax
.endif
@ -154,7 +158,7 @@ SYM_CODE_END(__get_user_handle_exception)
#ifdef CONFIG_X86_32
SYM_CODE_START_LOCAL(__get_user_8_handle_exception)
ASM_CLAC
bad_get_user_8:
.Lbad_get_user_8:
xor %edx,%edx
xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX

View File

@ -183,7 +183,7 @@ static void blk_flush_complete_seq(struct request *rq,
/* queue for flush */
if (list_empty(pending))
fq->flush_pending_since = jiffies;
list_move_tail(&rq->queuelist, pending);
list_add_tail(&rq->queuelist, pending);
break;
case REQ_FSEQ_DATA:
@ -261,6 +261,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
unsigned int seq = blk_flush_cur_seq(rq);
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
list_del_init(&rq->queuelist);
blk_flush_complete_seq(rq, fq, seq, error);
}

View File

@ -313,7 +313,7 @@ static int read_sed_opal_key(const char *key_name, u_char *buffer, int buflen)
&key_type_user, key_name, true);
if (IS_ERR(kref))
ret = PTR_ERR(kref);
return PTR_ERR(kref);
key = key_ref_to_ptr(kref);
down_read(&key->sem);

View File

@ -198,16 +198,16 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
}
/*
* AMD systems from Renoir and Lucienne *require* that the NVME controller
* AMD systems from Renoir onwards *require* that the NVME controller
* is put into D3 over a Modern Standby / suspend-to-idle cycle.
*
* This is "typically" accomplished using the `StorageD3Enable`
* property in the _DSD that is checked via the `acpi_storage_d3` function
* but this property was introduced after many of these systems launched
* and most OEM systems don't have it in their BIOS.
* but some OEM systems still don't have it in their BIOS.
*
* The Microsoft documentation for StorageD3Enable mentioned that Windows has
* a hardcoded allowlist for D3 support, which was used for these platforms.
* a hardcoded allowlist for D3 support as well as a registry key to override
* the BIOS, which has been used for these cases.
*
* This allows quirking on Linux in a similar fashion.
*
@ -220,19 +220,15 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
* https://bugzilla.kernel.org/show_bug.cgi?id=216773
* https://bugzilla.kernel.org/show_bug.cgi?id=217003
* 2) On at least one HP system StorageD3Enable is missing on the second NVME
disk in the system.
* disk in the system.
* 3) On at least one HP Rembrandt system StorageD3Enable is missing on the only
* NVME device.
*/
static const struct x86_cpu_id storage_d3_cpu_ids[] = {
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL), /* Picasso */
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */
{}
};
bool force_storage_d3(void)
{
return x86_match_cpu(storage_d3_cpu_ids);
if (!cpu_feature_enabled(X86_FEATURE_ZEN))
return false;
return acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0;
}
/*

View File

@ -2664,8 +2664,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
if (!env)
return -ENOMEM;
/* Synchronize with really_probe() */
device_lock(dev);
/* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env);
device_unlock(dev);
if (retval)
goto out;

View File

@ -112,7 +112,7 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
dev->zone_max_open = dev->zone_max_active;
pr_info("changed the maximum number of open zones to %u\n",
dev->nr_zones);
dev->zone_max_open);
} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
dev->zone_max_open = 0;
pr_info("zone_max_open limit disabled, limit >= zone count\n");

View File

@ -144,7 +144,7 @@ void clkdev_add_table(struct clk_lookup *cl, size_t num)
mutex_unlock(&clocks_mutex);
}
#define MAX_DEV_ID 20
#define MAX_DEV_ID 24
#define MAX_CON_ID 16
struct clk_lookup_alloc {

View File

@ -4,7 +4,6 @@
* Copyright (C) 2020 Zong Li
*/
#include <linux/clkdev.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
@ -536,13 +535,6 @@ static int __prci_register_clocks(struct device *dev, struct __prci_data *pd,
return r;
}
r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
if (r) {
dev_warn(dev, "Failed to register clkdev for %s: %d\n",
init.name, r);
return r;
}
pd->hw_clks.hws[i] = &pic->hw;
}

View File

@ -2186,15 +2186,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
struct device *dev;
int rc;
switch (mode) {
case CXL_DECODER_RAM:
case CXL_DECODER_PMEM:
break;
default:
dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
return ERR_PTR(-EINVAL);
}
cxlr = cxl_region_alloc(cxlrd, id);
if (IS_ERR(cxlr))
return cxlr;
@ -2245,6 +2236,15 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
{
int rc;
switch (mode) {
case CXL_DECODER_RAM:
case CXL_DECODER_PMEM:
break;
default:
dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
return ERR_PTR(-EINVAL);
}
rc = memregion_alloc(GFP_KERNEL);
if (rc < 0)
return ERR_PTR(rc);

View File

@ -540,6 +540,12 @@ static int race_signal_callback(void *arg)
t[i].before = pass;
t[i].task = kthread_run(thread_signal_callback, &t[i],
"dma-fence:%d", i);
if (IS_ERR(t[i].task)) {
ret = PTR_ERR(t[i].task);
while (--i >= 0)
kthread_stop_put(t[i].task);
return ret;
}
get_task_struct(t[i].task);
}

View File

@ -1033,8 +1033,8 @@ static int axi_dmac_remove(struct platform_device *pdev)
{
struct axi_dmac *dmac = platform_get_drvdata(pdev);
of_dma_controller_free(pdev->dev.of_node);
free_irq(dmac->irq, dmac);
of_dma_controller_free(pdev->dev.of_node);
tasklet_kill(&dmac->chan.vchan.task);
dma_async_device_unregister(&dmac->dma_dev);
clk_disable_unprepare(dmac->clk);

View File

@ -498,13 +498,14 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
ret = qcom_scm_bw_enable();
if (ret)
return ret;
goto disable_clk;
desc.args[1] = mdata_phys;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
disable_clk:
qcom_scm_clk_disable();
out:
@ -566,10 +567,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
ret = qcom_scm_bw_enable();
if (ret)
return ret;
goto disable_clk;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
disable_clk:
qcom_scm_clk_disable();
return ret ? : res.result[0];
@ -601,10 +604,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral)
ret = qcom_scm_bw_enable();
if (ret)
return ret;
goto disable_clk;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
disable_clk:
qcom_scm_clk_disable();
return ret ? : res.result[0];
@ -635,11 +640,12 @@ int qcom_scm_pas_shutdown(u32 peripheral)
ret = qcom_scm_bw_enable();
if (ret)
return ret;
goto disable_clk;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
disable_clk:
qcom_scm_clk_disable();
return ret ? : res.result[0];

View File

@ -1507,7 +1507,7 @@ config GPIO_TPS68470
are "output only" GPIOs.
config GPIO_TQMX86
tristate "TQ-Systems QTMX86 GPIO"
tristate "TQ-Systems TQMx86 GPIO"
depends on MFD_TQMX86 || COMPILE_TEST
depends on HAS_IOPORT_MAP
select GPIOLIB_IRQCHIP

View File

@ -6,6 +6,7 @@
* Vadim V.Vlasov <vvlasov@dev.rtsoft.ru>
*/
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@ -28,16 +29,25 @@
#define TQMX86_GPIIC 3 /* GPI Interrupt Configuration Register */
#define TQMX86_GPIIS 4 /* GPI Interrupt Status Register */
#define TQMX86_GPII_NONE 0
#define TQMX86_GPII_FALLING BIT(0)
#define TQMX86_GPII_RISING BIT(1)
/* Stored in irq_type as a trigger type, but not actually valid as a register
* value, so the name doesn't use "GPII"
*/
#define TQMX86_INT_BOTH (BIT(0) | BIT(1))
#define TQMX86_GPII_MASK (BIT(0) | BIT(1))
#define TQMX86_GPII_BITS 2
/* Stored in irq_type with GPII bits */
#define TQMX86_INT_UNMASKED BIT(2)
struct tqmx86_gpio_data {
struct gpio_chip chip;
void __iomem *io_base;
int irq;
/* Lock must be held for accessing output and irq_type fields */
raw_spinlock_t spinlock;
DECLARE_BITMAP(output, TQMX86_NGPIO);
u8 irq_type[TQMX86_NGPI];
};
@ -64,15 +74,10 @@ static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
unsigned long flags;
u8 val;
raw_spin_lock_irqsave(&gpio->spinlock, flags);
val = tqmx86_gpio_read(gpio, TQMX86_GPIOD);
if (value)
val |= BIT(offset);
else
val &= ~BIT(offset);
tqmx86_gpio_write(gpio, val, TQMX86_GPIOD);
__assign_bit(offset, gpio->output, value);
tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
}
@ -107,21 +112,38 @@ static int tqmx86_gpio_get_direction(struct gpio_chip *chip,
return GPIO_LINE_DIRECTION_OUT;
}
static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int offset)
__must_hold(&gpio->spinlock)
{
u8 type = TQMX86_GPII_NONE, gpiic;
if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) {
type = gpio->irq_type[offset] & TQMX86_GPII_MASK;
if (type == TQMX86_INT_BOTH)
type = tqmx86_gpio_get(&gpio->chip, offset + TQMX86_NGPO)
? TQMX86_GPII_FALLING
: TQMX86_GPII_RISING;
}
gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
gpiic &= ~(TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS));
gpiic |= type << (offset * TQMX86_GPII_BITS);
tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
}
static void tqmx86_gpio_irq_mask(struct irq_data *data)
{
unsigned int offset = (data->hwirq - TQMX86_NGPO);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
unsigned long flags;
u8 gpiic, mask;
mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
raw_spin_lock_irqsave(&gpio->spinlock, flags);
gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
gpiic &= ~mask;
tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
gpio->irq_type[offset] &= ~TQMX86_INT_UNMASKED;
tqmx86_gpio_irq_config(gpio, offset);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data));
}
@ -131,16 +153,12 @@ static void tqmx86_gpio_irq_unmask(struct irq_data *data)
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
unsigned long flags;
u8 gpiic, mask;
mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data));
raw_spin_lock_irqsave(&gpio->spinlock, flags);
gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
gpiic &= ~mask;
gpiic |= gpio->irq_type[offset] << (offset * TQMX86_GPII_BITS);
tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
gpio->irq_type[offset] |= TQMX86_INT_UNMASKED;
tqmx86_gpio_irq_config(gpio, offset);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
}
@ -151,7 +169,7 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
unsigned int offset = (data->hwirq - TQMX86_NGPO);
unsigned int edge_type = type & IRQF_TRIGGER_MASK;
unsigned long flags;
u8 new_type, gpiic;
u8 new_type;
switch (edge_type) {
case IRQ_TYPE_EDGE_RISING:
@ -161,19 +179,16 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
new_type = TQMX86_GPII_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
new_type = TQMX86_GPII_FALLING | TQMX86_GPII_RISING;
new_type = TQMX86_INT_BOTH;
break;
default:
return -EINVAL; /* not supported */
}
gpio->irq_type[offset] = new_type;
raw_spin_lock_irqsave(&gpio->spinlock, flags);
gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
gpiic &= ~((TQMX86_GPII_MASK) << (offset * TQMX86_GPII_BITS));
gpiic |= new_type << (offset * TQMX86_GPII_BITS);
tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
gpio->irq_type[offset] &= ~TQMX86_GPII_MASK;
gpio->irq_type[offset] |= new_type;
tqmx86_gpio_irq_config(gpio, offset);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
return 0;
@ -184,8 +199,8 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
unsigned long irq_bits;
int i = 0;
unsigned long irq_bits, flags;
int i;
u8 irq_status;
chained_irq_enter(irq_chip, desc);
@ -194,6 +209,34 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS);
irq_bits = irq_status;
raw_spin_lock_irqsave(&gpio->spinlock, flags);
for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
/*
* Edge-both triggers are implemented by flipping the edge
* trigger after each interrupt, as the controller only supports
* either rising or falling edge triggers, but not both.
*
* Internally, the TQMx86 GPIO controller has separate status
* registers for rising and falling edge interrupts. GPIIC
* configures which bits from which register are visible in the
* interrupt status register GPIIS and defines what triggers the
* parent IRQ line. Writing to GPIIS always clears both rising
* and falling interrupt flags internally, regardless of the
* currently configured trigger.
*
* In consequence, we can cleanly implement the edge-both
* trigger in software by first clearing the interrupt and then
* setting the new trigger based on the current GPIO input in
* tqmx86_gpio_irq_config() - even if an edge arrives between
* reading the input and setting the trigger, we will have a new
* interrupt pending.
*/
if ((gpio->irq_type[i] & TQMX86_GPII_MASK) == TQMX86_INT_BOTH)
tqmx86_gpio_irq_config(gpio, i);
}
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
for_each_set_bit(i, &irq_bits, TQMX86_NGPI)
generic_handle_domain_irq(gpio->chip.irq.domain,
i + TQMX86_NGPO);
@ -277,6 +320,13 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
tqmx86_gpio_write(gpio, (u8)~TQMX86_DIR_INPUT_MASK, TQMX86_GPIODD);
/*
* Reading the previous output state is not possible with TQMx86 hardware.
* Initialize all outputs to 0 to have a defined state that matches the
* shadow register.
*/
tqmx86_gpio_write(gpio, 0, TQMX86_GPIOD);
chip = &gpio->chip;
chip->label = "gpio-tqmx86";
chip->owner = THIS_MODULE;

View File

@ -259,7 +259,7 @@ komeda_component_get_avail_scaler(struct komeda_component *c,
u32 avail_scalers;
pipe_st = komeda_pipeline_get_state(c->pipeline, state);
if (!pipe_st)
if (IS_ERR_OR_NULL(pipe_st))
return NULL;
avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^

View File

@ -360,9 +360,12 @@ EXPORT_SYMBOL(drm_panel_bridge_set_orientation);
static void devm_drm_panel_bridge_release(struct device *dev, void *res)
{
struct drm_bridge **bridge = res;
struct drm_bridge *bridge = *(struct drm_bridge **)res;
drm_panel_bridge_remove(*bridge);
if (!bridge)
return;
drm_bridge_remove(bridge);
}
/**

View File

@ -610,6 +610,9 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return ret;
}
if (is_cow_mapping(vma->vm_flags))
return -EINVAL;
dma_resv_lock(shmem->base.resv, NULL);
ret = drm_gem_shmem_get_pages(shmem);
dma_resv_unlock(shmem->base.resv);

View File

@ -309,6 +309,7 @@ static int vidi_get_modes(struct drm_connector *connector)
struct vidi_context *ctx = ctx_from_connector(connector);
struct edid *edid;
int edid_len;
int count;
/*
* the edid data comes from user side and it would be set
@ -328,7 +329,11 @@ static int vidi_get_modes(struct drm_connector *connector)
drm_connector_update_edid_property(connector, edid);
return drm_add_edid_modes(connector, edid);
count = drm_add_edid_modes(connector, edid);
kfree(edid);
return count;
}
static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {

View File

@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
int ret;
if (!hdata->ddc_adpt)
return 0;
goto no_edid;
edid = drm_get_edid(connector, hdata->ddc_adpt);
if (!edid)
return 0;
goto no_edid;
hdata->dvi_mode = !connector->display_info.is_hdmi;
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
@ -906,6 +906,9 @@ static int hdmi_get_modes(struct drm_connector *connector)
kfree(edid);
return ret;
no_edid:
return drm_add_modes_noedid(connector, 640, 480);
}
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)

View File

@ -1251,17 +1251,6 @@ static const struct component_ops i915_audio_component_bind_ops = {
static void i915_audio_component_init(struct drm_i915_private *i915)
{
u32 aud_freq, aud_freq_init;
int ret;
ret = component_add_typed(i915->drm.dev,
&i915_audio_component_bind_ops,
I915_COMPONENT_AUDIO);
if (ret < 0) {
drm_err(&i915->drm,
"failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
return;
}
if (DISPLAY_VER(i915) >= 9) {
aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL);
@ -1284,6 +1273,21 @@ static void i915_audio_component_init(struct drm_i915_private *i915)
/* init with current cdclk */
intel_audio_cdclk_change_post(i915);
}
static void i915_audio_component_register(struct drm_i915_private *i915)
{
int ret;
ret = component_add_typed(i915->drm.dev,
&i915_audio_component_bind_ops,
I915_COMPONENT_AUDIO);
if (ret < 0) {
drm_err(&i915->drm,
"failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
return;
}
i915->display.audio.component_registered = true;
}
@ -1316,6 +1320,12 @@ void intel_audio_init(struct drm_i915_private *i915)
i915_audio_component_init(i915);
}
void intel_audio_register(struct drm_i915_private *i915)
{
if (!i915->display.audio.lpe.platdev)
i915_audio_component_register(i915);
}
/**
* intel_audio_deinit() - deinitialize the audio driver
* @i915: the i915 drm device private data

View File

@ -28,6 +28,7 @@ void intel_audio_codec_get_config(struct intel_encoder *encoder,
void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_register(struct drm_i915_private *i915);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
void intel_audio_sdp_split_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);

View File

@ -386,6 +386,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
intel_audio_init(i915);
intel_audio_register(i915);
intel_display_debugfs_register(i915);
/*

View File

@ -285,7 +285,9 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
/* TODO: make DPT shrinkable when it has no bound vmas */
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
!obj->is_dpt;
}
static inline bool

View File

@ -258,8 +258,13 @@ static void signal_irq_work(struct irq_work *work)
i915_request_put(rq);
}
/* Lazy irq enabling after HW submission */
if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
intel_breadcrumbs_arm_irq(b);
/* And confirm that we still want irqs enabled before we yield */
if (READ_ONCE(b->irq_armed) && !atomic_read(&b->active))
intel_breadcrumbs_disarm_irq(b);
}
struct intel_breadcrumbs *
@ -310,13 +315,7 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
return;
/* Kick the work once more to drain the signalers, and disarm the irq */
irq_work_sync(&b->irq_work);
while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
local_irq_disable();
signal_irq_work(&b->irq_work);
local_irq_enable();
cond_resched();
}
irq_work_queue(&b->irq_work);
}
void intel_breadcrumbs_free(struct kref *kref)
@ -399,7 +398,7 @@ static void insert_breadcrumb(struct i915_request *rq)
* the request as it may have completed and raised the interrupt as
* we were attaching it into the lists.
*/
if (!b->irq_armed || __i915_request_is_complete(rq))
if (!READ_ONCE(b->irq_armed) || __i915_request_is_complete(rq))
irq_work_queue(&b->irq_work);
}

View File

@ -643,7 +643,9 @@ static int st7789v_probe(struct spi_device *spi)
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
ret = of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
if (ret)
return dev_err_probe(&spi->dev, ret, "Failed to get orientation\n");
drm_panel_add(&ctx->panel);

View File

@ -956,13 +956,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
vmw_read(dev_priv,
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
/*
* Workaround for low memory 2D VMs to compensate for the
* allocation taken by fbdev
*/
if (!(dev_priv->capabilities & SVGA_CAP_3D))
mem_size *= 3;
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->max_primary_mem =
vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);

View File

@ -1067,9 +1067,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth);
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height);
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,

View File

@ -35,6 +35,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include <drm/drm_sysfs.h>
#include <drm/drm_edid.h>
void vmw_du_cleanup(struct vmw_display_unit *du)
{
@ -215,7 +216,7 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
new_image = vmw_du_cursor_plane_acquire_image(new_vps);
changed = false;
if (old_image && new_image)
if (old_image && new_image && old_image != new_image)
changed = memcmp(old_image, new_image, size) != 0;
return changed;
@ -2150,13 +2151,12 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
return 0;
}
static
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height)
u64 pitch,
u64 height)
{
return ((u64) pitch * (u64) height) < (u64)
((dev_priv->active_display_unit == vmw_du_screen_target) ?
dev_priv->max_primary_mem : dev_priv->vram_size);
return (pitch * height) < (u64)dev_priv->vram_size;
}
/**
@ -2279,107 +2279,6 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
connector_status_connected : connector_status_disconnected);
}
static struct drm_display_mode vmw_kms_connector_builtin[] = {
/* 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 489, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x720@60Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
1472, 1664, 0, 720, 723, 728, 748, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1853x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1080@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1440@60Hz */
{ DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2880x1800@60Hz */
{ DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 3840x2160@60Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 3840x2400@60Hz */
{ DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* Terminate */
{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
};
/**
* vmw_guess_mode_timing - Provide fake timings for a
* 60Hz vrefresh mode.
@ -2401,88 +2300,6 @@ void vmw_guess_mode_timing(struct drm_display_mode *mode)
}
int vmw_du_connector_fill_modes(struct drm_connector *connector,
uint32_t max_width, uint32_t max_height)
{
struct vmw_display_unit *du = vmw_connector_to_du(connector);
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_display_mode *mode = NULL;
struct drm_display_mode *bmode;
struct drm_display_mode prefmode = { DRM_MODE("preferred",
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
int i;
u32 assumed_bpp = 4;
if (dev_priv->assume_16bpp)
assumed_bpp = 2;
max_width = min(max_width, dev_priv->texture_max_width);
max_height = min(max_height, dev_priv->texture_max_height);
/*
* For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
* HEIGHT registers.
*/
if (dev_priv->active_display_unit == vmw_du_screen_target) {
max_width = min(max_width, dev_priv->stdu_max_width);
max_height = min(max_height, dev_priv->stdu_max_height);
}
/* Add preferred mode */
mode = drm_mode_duplicate(dev, &prefmode);
if (!mode)
return 0;
mode->hdisplay = du->pref_width;
mode->vdisplay = du->pref_height;
vmw_guess_mode_timing(mode);
drm_mode_set_name(mode);
if (vmw_kms_validate_mode_vram(dev_priv,
mode->hdisplay * assumed_bpp,
mode->vdisplay)) {
drm_mode_probed_add(connector, mode);
} else {
drm_mode_destroy(dev, mode);
mode = NULL;
}
if (du->pref_mode) {
list_del_init(&du->pref_mode->head);
drm_mode_destroy(dev, du->pref_mode);
}
/* mode might be null here, this is intended */
du->pref_mode = mode;
for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
bmode = &vmw_kms_connector_builtin[i];
if (bmode->hdisplay > max_width ||
bmode->vdisplay > max_height)
continue;
if (!vmw_kms_validate_mode_vram(dev_priv,
bmode->hdisplay * assumed_bpp,
bmode->vdisplay))
continue;
mode = drm_mode_duplicate(dev, bmode);
if (!mode)
return 0;
drm_mode_probed_add(connector, mode);
}
drm_connector_list_update(connector);
/* Move the prefered mode first, help apps pick the right mode. */
drm_mode_sort(&connector->modes);
return 1;
}
/**
* vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
* @dev: drm device for the ioctl
@ -3023,3 +2840,84 @@ out_unref:
vmw_validation_unref_lists(&val_ctx);
return ret;
}
/**
* vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
*
* @connector: the drm connector, part of a DU container
* @mode: drm mode to check
*
* Returns MODE_OK on success, or a drm_mode_status error code.
*/
enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
enum drm_mode_status ret;
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
u32 assumed_cpp = 4;
if (dev_priv->assume_16bpp)
assumed_cpp = 2;
ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
dev_priv->texture_max_height);
if (ret != MODE_OK)
return ret;
if (!vmw_kms_validate_mode_vram(dev_priv,
mode->hdisplay * assumed_cpp,
mode->vdisplay))
return MODE_MEM;
return MODE_OK;
}
/**
* vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
*
* @connector: the drm connector, part of a DU container
*
* Returns the number of added modes.
*/
int vmw_connector_get_modes(struct drm_connector *connector)
{
struct vmw_display_unit *du = vmw_connector_to_du(connector);
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_display_mode *mode = NULL;
struct drm_display_mode prefmode = { DRM_MODE("preferred",
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
u32 max_width;
u32 max_height;
u32 num_modes;
/* Add preferred mode */
mode = drm_mode_duplicate(dev, &prefmode);
if (!mode)
return 0;
mode->hdisplay = du->pref_width;
mode->vdisplay = du->pref_height;
vmw_guess_mode_timing(mode);
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
/* Probe connector for all modes not exceeding our geom limits */
max_width = dev_priv->texture_max_width;
max_height = dev_priv->texture_max_height;
if (dev_priv->active_display_unit == vmw_du_screen_target) {
max_width = min(dev_priv->stdu_max_width, max_width);
max_height = min(dev_priv->stdu_max_height, max_height);
}
num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
return num_modes;
}

View File

@ -378,7 +378,6 @@ struct vmw_display_unit {
unsigned pref_width;
unsigned pref_height;
bool pref_active;
struct drm_display_mode *pref_mode;
/*
* Gui positioning
@ -428,8 +427,6 @@ void vmw_du_connector_save(struct drm_connector *connector);
void vmw_du_connector_restore(struct drm_connector *connector);
enum drm_connector_status
vmw_du_connector_detect(struct drm_connector *connector, bool force);
int vmw_du_connector_fill_modes(struct drm_connector *connector,
uint32_t max_width, uint32_t max_height);
int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
const struct drm_clip_rect *clips,
@ -438,6 +435,9 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
int num_clips,
int increment,
struct vmw_kms_dirty *dirty);
enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode);
int vmw_connector_get_modes(struct drm_connector *connector);
void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,

View File

@ -304,7 +304,7 @@ static void vmw_ldu_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = vmw_ldu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
@ -313,6 +313,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
.get_modes = vmw_connector_get_modes,
.mode_valid = vmw_connector_mode_valid
};
static int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
@ -449,7 +451,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
ldu->base.pref_active = (unit == 0);
ldu->base.pref_width = dev_priv->initial_width;
ldu->base.pref_height = dev_priv->initial_height;
ldu->base.pref_mode = NULL;
/*
* Remove this after enabling atomic because property values can

View File

@ -347,7 +347,7 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs vmw_sou_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = vmw_sou_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
@ -357,6 +357,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
.get_modes = vmw_connector_get_modes,
.mode_valid = vmw_connector_mode_valid
};
@ -826,7 +828,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
sou->base.pref_active = (unit == 0);
sou->base.pref_width = dev_priv->initial_width;
sou->base.pref_height = dev_priv->initial_height;
sou->base.pref_mode = NULL;
/*
* Remove this after enabling atomic because property values can

View File

@ -41,7 +41,14 @@
#define vmw_connector_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.connector)
/*
* Some renderers such as llvmpipe will align the width and height of their
* buffers to match their tile size. We need to keep this in mind when exposing
* modes to userspace so that this possible over-allocation will not exceed
* graphics memory. 64x64 pixels seems to be a reasonable upper bound for the
* tile size of current renderers.
*/
#define GPU_TILE_SIZE 64
enum stdu_content_type {
SAME_AS_DISPLAY = 0,
@ -825,12 +832,46 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
vmw_stdu_destroy(vmw_connector_to_stdu(connector));
}
static enum drm_mode_status
vmw_stdu_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
enum drm_mode_status ret;
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
u64 assumed_cpp = dev_priv->assume_16bpp ? 2 : 4;
/* Align width and height to account for GPU tile over-alignment */
u64 required_mem = ALIGN(mode->hdisplay, GPU_TILE_SIZE) *
ALIGN(mode->vdisplay, GPU_TILE_SIZE) *
assumed_cpp;
required_mem = ALIGN(required_mem, PAGE_SIZE);
ret = drm_mode_validate_size(mode, dev_priv->stdu_max_width,
dev_priv->stdu_max_height);
if (ret != MODE_OK)
return ret;
ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
dev_priv->texture_max_height);
if (ret != MODE_OK)
return ret;
if (required_mem > dev_priv->max_primary_mem)
return MODE_MEM;
if (required_mem > dev_priv->max_mob_pages * PAGE_SIZE)
return MODE_MEM;
if (required_mem > dev_priv->max_mob_size)
return MODE_MEM;
return MODE_OK;
}
static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = vmw_stdu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
@ -840,6 +881,8 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
.get_modes = vmw_connector_get_modes,
.mode_valid = vmw_stdu_connector_mode_valid
};

View File

@ -694,6 +694,7 @@ static void gb_interface_release(struct device *dev)
trace_gb_interface_release(intf);
cancel_work_sync(&intf->mode_switch_work);
kfree(intf);
}

View File

@ -1448,7 +1448,6 @@ static void implement(const struct hid_device *hid, u8 *report,
hid_warn(hid,
"%s() called with too large value %d (n: %d)! (%s)\n",
__func__, value, n, current->comm);
WARN_ON(1);
value &= m;
}
}

View File

@ -1284,8 +1284,10 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
*/
msleep(50);
if (retval)
if (retval) {
kfree(dj_report);
return retval;
}
}
/*

View File

@ -283,7 +283,9 @@ static struct input_dev *shield_haptics_create(
return haptics;
input_set_capability(haptics, EV_FF, FF_RUMBLE);
input_ff_create_memless(haptics, NULL, play_effect);
ret = input_ff_create_memless(haptics, NULL, play_effect);
if (ret)
goto err;
ret = input_register_device(haptics);
if (ret)

View File

@ -294,6 +294,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xae24),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
/* Meteor Lake-S */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7f26),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
/* Raptor Lake-S */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7a26),
@ -304,6 +309,26 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa76f),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
/* Granite Rapids */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0963),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
/* Granite Rapids SOC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3256),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
/* Sapphire Rapids SOC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3456),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
/* Lunar Lake */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa824),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
/* Alder Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),

View File

@ -106,8 +106,7 @@ static int at91_unreg_slave(struct i2c_client *slave)
static u32 at91_twi_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_SLAVE | I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
| I2C_FUNC_SMBUS_READ_BLOCK_DATA;
return I2C_FUNC_SLAVE;
}
static const struct i2c_algorithm at91_twi_algorithm_slave = {

View File

@ -220,7 +220,7 @@ static const struct i2c_algorithm i2c_dw_algo = {
void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
{
dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
dev->functionality = I2C_FUNC_SLAVE;
dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;

View File

@ -225,11 +225,11 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index,
}
static const struct iio_chan_spec ad9434_channels[] = {
AD9467_CHAN(0, 0, 12, 'S'),
AD9467_CHAN(0, 0, 12, 's'),
};
static const struct iio_chan_spec ad9467_channels[] = {
AD9467_CHAN(0, 0, 16, 'S'),
AD9467_CHAN(0, 0, 16, 's'),
};
static const struct ad9467_chip_info ad9467_chip_tbl = {

View File

@ -175,6 +175,7 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
struct adi_axi_adc_state *st;
void __iomem *base;
unsigned int ver;
struct clk *clk;
int ret;
st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
@ -195,6 +196,10 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
if (!expected_ver)
return -ENODEV;
clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
/*
* Force disable the core. Up to the frontend to enable us. And we can
* still read/write registers...

View File

@ -60,11 +60,15 @@ EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_init, IIO_INV_SENSORS_TIMESTAMP);
int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
uint32_t period, bool fifo)
{
uint32_t mult;
/* when FIFO is on, prevent odr change if one is already pending */
if (fifo && ts->new_mult != 0)
return -EAGAIN;
ts->new_mult = period / ts->chip.clock_period;
mult = period / ts->chip.clock_period;
if (mult != ts->mult)
ts->new_mult = mult;
return 0;
}
@ -101,6 +105,9 @@ static bool inv_update_chip_period(struct inv_sensors_timestamp *ts,
static void inv_align_timestamp_it(struct inv_sensors_timestamp *ts)
{
const int64_t period_min = ts->min_period * ts->mult;
const int64_t period_max = ts->max_period * ts->mult;
int64_t add_max, sub_max;
int64_t delta, jitter;
int64_t adjust;
@ -108,11 +115,13 @@ static void inv_align_timestamp_it(struct inv_sensors_timestamp *ts)
delta = ts->it.lo - ts->timestamp;
/* adjust timestamp while respecting jitter */
add_max = period_max - (int64_t)ts->period;
sub_max = period_min - (int64_t)ts->period;
jitter = INV_SENSORS_TIMESTAMP_JITTER((int64_t)ts->period, ts->chip.jitter);
if (delta > jitter)
adjust = jitter;
adjust = add_max;
else if (delta < -jitter)
adjust = -jitter;
adjust = sub_max;
else
adjust = 0;

View File

@ -415,7 +415,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
s64 tmp = *val * (3767897513LL / 25LL);
*val = div_s64_rem(tmp, 1000000000LL, val2);
return IIO_VAL_INT_PLUS_MICRO;
return IIO_VAL_INT_PLUS_NANO;
}
mutex_lock(&st->lock);

View File

@ -129,10 +129,6 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
/* update data FIFO write */
inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
if (ret)
goto out_unlock;
ret = inv_icm42600_buffer_update_watermark(st);
out_unlock:
mutex_unlock(&st->lock);

View File

@ -129,10 +129,6 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
/* update data FIFO write */
inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
if (ret)
goto out_unlock;
ret = inv_icm42600_buffer_update_watermark(st);
out_unlock:
mutex_unlock(&st->lock);

View File

@ -1692,8 +1692,17 @@ static void __init free_pci_segments(void)
}
}
static void __init free_sysfs(struct amd_iommu *iommu)
{
if (iommu->iommu.dev) {
iommu_device_unregister(&iommu->iommu);
iommu_device_sysfs_remove(&iommu->iommu);
}
}
static void __init free_iommu_one(struct amd_iommu *iommu)
{
free_sysfs(iommu);
free_cwwb_sem(iommu);
free_command_buffer(iommu);
free_event_buffer(iommu);

View File

@ -1849,28 +1849,22 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
int ret = 0;
if (!info->map)
return -EINVAL;
raw_spin_lock(&its_dev->event_map.vlpi_lock);
if (!its_dev->event_map.vm) {
struct its_vlpi_map *maps;
maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
GFP_ATOMIC);
if (!maps) {
ret = -ENOMEM;
goto out;
}
if (!maps)
return -ENOMEM;
its_dev->event_map.vm = info->map->vm;
its_dev->event_map.vlpi_maps = maps;
} else if (its_dev->event_map.vm != info->map->vm) {
ret = -EINVAL;
goto out;
return -EINVAL;
}
/* Get our private copy of the mapping information */
@ -1902,46 +1896,32 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
its_dev->event_map.nr_vlpis++;
}
out:
raw_spin_unlock(&its_dev->event_map.vlpi_lock);
return ret;
return 0;
}
static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_vlpi_map *map;
int ret = 0;
raw_spin_lock(&its_dev->event_map.vlpi_lock);
map = get_vlpi_map(d);
if (!its_dev->event_map.vm || !map) {
ret = -EINVAL;
goto out;
}
if (!its_dev->event_map.vm || !map)
return -EINVAL;
/* Copy our mapping information to the incoming request */
*info->map = *map;
out:
raw_spin_unlock(&its_dev->event_map.vlpi_lock);
return ret;
return 0;
}
static int its_vlpi_unmap(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
int ret = 0;
raw_spin_lock(&its_dev->event_map.vlpi_lock);
if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
ret = -EINVAL;
goto out;
}
if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
return -EINVAL;
/* Drop the virtual mapping */
its_send_discard(its_dev, event);
@ -1965,9 +1945,7 @@ static int its_vlpi_unmap(struct irq_data *d)
kfree(its_dev->event_map.vlpi_maps);
}
out:
raw_spin_unlock(&its_dev->event_map.vlpi_lock);
return ret;
return 0;
}
static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
@ -1995,6 +1973,8 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
if (!is_v4(its_dev->its))
return -EINVAL;
guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock);
/* Unmap request? */
if (!info)
return its_vlpi_unmap(d);

View File

@ -17,17 +17,19 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <linux/soc/andes/irq.h>
static struct irq_domain *intc_domain;
static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG;
static unsigned int riscv_intc_custom_base __ro_after_init = BITS_PER_LONG;
static unsigned int riscv_intc_custom_nr_irqs __ro_after_init;
static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
{
unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
if (unlikely(cause >= BITS_PER_LONG))
panic("unexpected interrupt cause");
generic_handle_domain_irq(intc_domain, cause);
if (generic_handle_domain_irq(intc_domain, cause))
pr_warn_ratelimited("Failed to handle interrupt (cause: %ld)\n", cause);
}
/*
@ -47,6 +49,31 @@ static void riscv_intc_irq_unmask(struct irq_data *d)
csr_set(CSR_IE, BIT(d->hwirq));
}
static void andes_intc_irq_mask(struct irq_data *d)
{
/*
* Andes specific S-mode local interrupt causes (hwirq)
* are defined as (256 + n) and controlled by n-th bit
* of SLIE.
*/
unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
if (d->hwirq < ANDES_SLI_CAUSE_BASE)
csr_clear(CSR_IE, mask);
else
csr_clear(ANDES_CSR_SLIE, mask);
}
static void andes_intc_irq_unmask(struct irq_data *d)
{
unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
if (d->hwirq < ANDES_SLI_CAUSE_BASE)
csr_set(CSR_IE, mask);
else
csr_set(ANDES_CSR_SLIE, mask);
}
static void riscv_intc_irq_eoi(struct irq_data *d)
{
/*
@ -70,12 +97,21 @@ static struct irq_chip riscv_intc_chip = {
.irq_eoi = riscv_intc_irq_eoi,
};
static struct irq_chip andes_intc_chip = {
.name = "RISC-V INTC",
.irq_mask = andes_intc_irq_mask,
.irq_unmask = andes_intc_irq_unmask,
.irq_eoi = riscv_intc_irq_eoi,
};
static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
struct irq_chip *chip = d->host_data;
irq_set_percpu_devid(irq);
irq_domain_set_info(d, irq, hwirq, &riscv_intc_chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
irq_domain_set_info(d, irq, hwirq, chip, NULL, handle_percpu_devid_irq,
NULL, NULL);
return 0;
}
@ -93,6 +129,14 @@ static int riscv_intc_domain_alloc(struct irq_domain *domain,
if (ret)
return ret;
/*
* Only allow hwirq for which we have corresponding standard or
* custom interrupt enable register.
*/
if ((hwirq >= riscv_intc_nr_irqs && hwirq < riscv_intc_custom_base) ||
(hwirq >= riscv_intc_custom_base + riscv_intc_custom_nr_irqs))
return -EINVAL;
for (i = 0; i < nr_irqs; i++) {
ret = riscv_intc_domain_map(domain, virq + i, hwirq + i);
if (ret)
@ -113,12 +157,12 @@ static struct fwnode_handle *riscv_intc_hwnode(void)
return intc_domain->fwnode;
}
static int __init riscv_intc_init_common(struct fwnode_handle *fn)
static int __init riscv_intc_init_common(struct fwnode_handle *fn,
struct irq_chip *chip)
{
int rc;
intc_domain = irq_domain_create_linear(fn, BITS_PER_LONG,
&riscv_intc_domain_ops, NULL);
intc_domain = irq_domain_create_tree(fn, &riscv_intc_domain_ops, chip);
if (!intc_domain) {
pr_err("unable to add IRQ domain\n");
return -ENXIO;
@ -132,7 +176,11 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn)
riscv_set_intc_hwnode_fn(riscv_intc_hwnode);
pr_info("%d local interrupts mapped\n", BITS_PER_LONG);
pr_info("%d local interrupts mapped\n", riscv_intc_nr_irqs);
if (riscv_intc_custom_nr_irqs) {
pr_info("%d custom local interrupts mapped\n",
riscv_intc_custom_nr_irqs);
}
return 0;
}
@ -140,8 +188,9 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn)
static int __init riscv_intc_init(struct device_node *node,
struct device_node *parent)
{
int rc;
struct irq_chip *chip = &riscv_intc_chip;
unsigned long hartid;
int rc;
rc = riscv_of_parent_hartid(node, &hartid);
if (rc < 0) {
@ -166,18 +215,26 @@ static int __init riscv_intc_init(struct device_node *node,
return 0;
}
return riscv_intc_init_common(of_node_to_fwnode(node));
if (of_device_is_compatible(node, "andestech,cpu-intc")) {
riscv_intc_custom_base = ANDES_SLI_CAUSE_BASE;
riscv_intc_custom_nr_irqs = ANDES_RV_IRQ_LAST;
chip = &andes_intc_chip;
}
return riscv_intc_init_common(of_node_to_fwnode(node), chip);
}
IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init);
#ifdef CONFIG_ACPI
static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
const unsigned long end)
{
struct fwnode_handle *fn;
struct acpi_madt_rintc *rintc;
struct fwnode_handle *fn;
int rc;
rintc = (struct acpi_madt_rintc *)header;
@ -196,7 +253,11 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
return -ENOMEM;
}
return riscv_intc_init_common(fn);
rc = riscv_intc_init_common(fn, &riscv_intc_chip);
if (rc)
irq_domain_free_fwnode(fn);
return rc;
}
IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,

View File

@ -69,8 +69,10 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]),
GFP_KERNEL);
if (!aux_bus->aux_device_wrapper[1])
return -ENOMEM;
if (!aux_bus->aux_device_wrapper[1]) {
retval = -ENOMEM;
goto err_aux_dev_add_0;
}
retval = ida_alloc(&gp_client_ida, GFP_KERNEL);
if (retval < 0)
@ -111,6 +113,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
err_aux_dev_add_1:
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev);
goto err_aux_dev_add_0;
err_aux_dev_init_1:
ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id);
@ -120,6 +123,7 @@ err_ida_alloc_1:
err_aux_dev_add_0:
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev);
goto err_ret;
err_aux_dev_init_0:
ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id);
@ -127,6 +131,7 @@ err_aux_dev_init_0:
err_ida_alloc_0:
kfree(aux_bus->aux_device_wrapper[0]);
err_ret:
return retval;
}

View File

@ -400,8 +400,10 @@ static int mei_me_pci_resume(struct device *device)
}
err = mei_restart(dev);
if (err)
if (err) {
free_irq(pdev->irq, dev);
return err;
}
/* Start timer if stopped in suspend */
schedule_delayed_work(&dev->timer_work, HZ);

View File

@ -9,6 +9,7 @@
#include <linux/vmw_vmci_api.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/nospec.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/rculist.h>
@ -86,9 +87,12 @@ static void event_deliver(struct vmci_event_msg *event_msg)
{
struct vmci_subscription *cur;
struct list_head *subscriber_list;
u32 sanitized_event, max_vmci_event;
rcu_read_lock();
subscriber_list = &subscriber_array[event_msg->event_data.event];
max_vmci_event = ARRAY_SIZE(subscriber_array);
sanitized_event = array_index_nospec(event_msg->event_data.event, max_vmci_event);
subscriber_list = &subscriber_array[sanitized_event];
list_for_each_entry_rcu(cur, subscriber_list, node) {
cur->callback(cur->id, &event_msg->event_data,
cur->callback_data);

View File

@ -431,8 +431,11 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
init_data.devname_mandatory = true;
init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d", ds->slave_mii_bus->id,
port_num);
if (!init_data.devicename)
if (!init_data.devicename) {
fwnode_handle_put(led);
fwnode_handle_put(leds);
return -ENOMEM;
}
ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data);
if (ret)
@ -441,6 +444,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
kfree(init_data.devicename);
}
fwnode_handle_put(leds);
return 0;
}
@ -471,9 +475,13 @@ qca8k_setup_led_ctrl(struct qca8k_priv *priv)
* the correct port for LED setup.
*/
ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num));
if (ret)
if (ret) {
fwnode_handle_put(port);
fwnode_handle_put(ports);
return ret;
}
}
fwnode_handle_put(ports);
return 0;
}

View File

@ -678,7 +678,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
req_type);
else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
req_type, token->seq_id, rc);
req_type, le16_to_cpu(ctx->req->seq_id), rc);
rc = __hwrm_to_stderr(rc);
exit:
if (token)

View File

@ -272,13 +272,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct,
pg_info->page_offset;
memcpy(skb->data, va, MIN_SKB_SIZE);
skb_put(skb, MIN_SKB_SIZE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
pg_info->page,
pg_info->page_offset + MIN_SKB_SIZE,
len - MIN_SKB_SIZE,
LIO_RXBUFFER_SZ);
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
pg_info->page,
pg_info->page_offset + MIN_SKB_SIZE,
len - MIN_SKB_SIZE,
LIO_RXBUFFER_SZ);
} else {
struct octeon_skb_page_info *pg_info =
((struct octeon_skb_page_info *)(skb->cb));

View File

@ -506,11 +506,13 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
}
static void gve_rx_free_skb(struct gve_rx_ring *rx)
static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
{
if (!rx->ctx.skb_head)
return;
if (rx->ctx.skb_head == napi->skb)
napi->skb = NULL;
dev_kfree_skb_any(rx->ctx.skb_head);
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
@ -783,7 +785,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
err = gve_rx_dqo(napi, rx, compl_desc, rx->q_num);
if (err < 0) {
gve_rx_free_skb(rx);
gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
if (err == -ENOMEM)
rx->rx_skb_alloc_fail++;
@ -826,7 +828,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* gve_rx_complete_skb() will consume skb if successful */
if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
gve_rx_free_skb(rx);
gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
rx->rx_desc_err_dropped_pkt++;
u64_stats_update_end(&rx->statss);

View File

@ -501,28 +501,18 @@ static int gve_prep_tso(struct sk_buff *skb)
if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
return -1;
if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
return -EINVAL;
/* Needed because we will modify header. */
err = skb_cow_head(skb, 0);
if (err < 0)
return err;
tcp = tcp_hdr(skb);
/* Remove payload length from checksum. */
paylen = skb->len - skb_transport_offset(skb);
switch (skb_shinfo(skb)->gso_type) {
case SKB_GSO_TCPV4:
case SKB_GSO_TCPV6:
csum_replace_by_diff(&tcp->check,
(__force __wsum)htonl(paylen));
/* Compute length of segmentation header. */
header_len = skb_tcp_all_headers(skb);
break;
default:
return -EINVAL;
}
csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
header_len = skb_tcp_all_headers(skb);
if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
return -EINVAL;

View File

@ -3612,6 +3612,9 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
ret = hns3_alloc_and_attach_buffer(ring, i);
if (ret)
goto out_buffer_fail;
if (!(i % HNS3_RESCHED_BD_NUM))
cond_resched();
}
return 0;
@ -5190,6 +5193,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
}
u64_stats_init(&priv->ring[i].syncp);
cond_resched();
}
return 0;

View File

@ -220,6 +220,7 @@ enum hns3_nic_state {
#define HNS3_CQ_MODE_CQE 0U
#define HNS3_FD_QB_FORCE_CNT_MAX 20
#define HNS3_RESCHED_BD_NUM 1024
enum hns3_pkt_l2t_type {
HNS3_L2_TYPE_UNICAST,

View File

@ -3089,9 +3089,7 @@ static void hclge_push_link_status(struct hclge_dev *hdev)
static void hclge_update_link_status(struct hclge_dev *hdev)
{
struct hnae3_handle *rhandle = &hdev->vport[0].roce;
struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hnae3_client *rclient = hdev->roce_client;
struct hnae3_client *client = hdev->nic_client;
int state;
int ret;
@ -3115,8 +3113,15 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
client->ops->link_status_change(handle, state);
hclge_config_mac_tnl_int(hdev, state);
if (rclient && rclient->ops->link_status_change)
rclient->ops->link_status_change(rhandle, state);
if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) {
struct hnae3_handle *rhandle = &hdev->vport[0].roce;
struct hnae3_client *rclient = hdev->roce_client;
if (rclient && rclient->ops->link_status_change)
rclient->ops->link_status_change(rhandle,
state);
}
hclge_push_link_status(hdev);
}
@ -11798,6 +11803,12 @@ clear_roce:
return ret;
}
static bool hclge_uninit_need_wait(struct hclge_dev *hdev)
{
return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
}
static void hclge_uninit_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@ -11806,7 +11817,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (hdev->roce_client) {
clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
while (hclge_uninit_need_wait(hdev))
msleep(HCLGE_WAIT_RESET_DONE);
hdev->roce_client->ops->uninit_instance(&vport->roce, 0);

View File

@ -407,7 +407,6 @@ struct ice_vsi {
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_tx_ring **xdp_rings; /* XDP ring array */
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@ -714,6 +713,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
ring->flags |= ICE_TX_FLAGS_RING_XDP;
}
/**
* ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
* @vsi: pointer to VSI
* @qid: index of a queue to look at XSK buff pool presence
*
* Return: A pointer to xsk_buff_pool structure if there is a buffer pool
* attached and configured as zero-copy, NULL otherwise.
*/
static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
u16 qid)
{
struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
if (!ice_is_xdp_ena_vsi(vsi))
return NULL;
return (pool && pool->dev) ? pool : NULL;
}
/**
* ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: Rx ring to use
@ -726,10 +744,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL;
return xsk_get_pool_from_qid(vsi->netdev, qid);
return ice_get_xp_from_qid(vsi, qid);
}
/**
@ -754,12 +769,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
if (!ring)
return;
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
ring->xsk_pool = NULL;
return;
}
ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
}
/**
@ -882,9 +892,16 @@ int ice_down(struct ice_vsi *vsi);
int ice_down_up(struct ice_vsi *vsi);
int ice_vsi_cfg_lan(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
enum ice_xdp_cfg {
ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */
ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */
};
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
int ice_destroy_xdp_rings(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
enum ice_xdp_cfg cfg_type);
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);

View File

@ -117,14 +117,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->q_vectors)
goto err_vectors;
vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
if (!vsi->af_xdp_zc_qps)
goto err_zc_qps;
return 0;
err_zc_qps:
devm_kfree(dev, vsi->q_vectors);
err_vectors:
devm_kfree(dev, vsi->rxq_map);
err_rxq_map:
@ -321,8 +315,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
bitmap_free(vsi->af_xdp_zc_qps);
vsi->af_xdp_zc_qps = NULL;
/* free the ring and vector containers */
devm_kfree(dev, vsi->q_vectors);
vsi->q_vectors = NULL;
@ -2470,7 +2462,8 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
goto unroll_vector_base;
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
ICE_XDP_CFG_PART);
if (ret)
goto unroll_vector_base;
}
@ -2621,7 +2614,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
/* return value check can be skipped here, it always returns
* 0 if reset is in progress
*/
ice_destroy_xdp_rings(vsi);
ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
ice_vsi_clear_rings(vsi);
ice_vsi_free_q_vectors(vsi);

View File

@ -2657,10 +2657,12 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
* @prog: bpf program that will be assigned to VSI
* @cfg_type: create from scratch or restore the existing configuration
*
* Return 0 on success and negative value on error
*/
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
int xdp_rings_rem = vsi->num_xdp_txq;
@ -2736,7 +2738,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
*/
if (ice_is_reset_in_progress(pf->state))
if (cfg_type == ICE_XDP_CFG_PART)
return 0;
/* tell the Tx scheduler that right now we have
@ -2788,22 +2790,21 @@ err_map_xdp:
/**
* ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
* @vsi: VSI to remove XDP rings
* @cfg_type: disable XDP permanently or allow it to be restored later
*
* Detach XDP rings from irq vectors, clean up the PF bitmap and free
* resources
*/
int ice_destroy_xdp_rings(struct ice_vsi *vsi)
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
int i, v_idx;
/* q_vectors are freed in reset path so there's no point in detaching
* rings; in case of rebuild being triggered not from reset bits
* in pf->state won't be set, so additionally check first q_vector
* against NULL
* rings
*/
if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
if (cfg_type == ICE_XDP_CFG_PART)
goto free_qmap;
ice_for_each_q_vector(vsi, v_idx) {
@ -2844,7 +2845,7 @@ free_qmap:
if (static_key_enabled(&ice_xdp_locking_key))
static_branch_dec(&ice_xdp_locking_key);
if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
if (cfg_type == ICE_XDP_CFG_PART)
return 0;
ice_vsi_assign_bpf_prog(vsi, NULL);
@ -2955,7 +2956,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
} else {
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
@ -2966,7 +2968,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_features_clear_redirect_target(vsi->netdev);
xdp_ring_err = ice_destroy_xdp_rings(vsi);
xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
/* reallocate Rx queues that were used for zero-copy */

View File

@ -441,8 +441,7 @@ int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
u16 pfa_len, pfa_ptr;
u16 next_tlv;
u16 pfa_len, pfa_ptr, next_tlv, max_tlv;
int status;
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
@ -455,11 +454,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
return status;
}
/* The Preserved Fields Area contains a sequence of Type-Length-Value
* structures which define its contents. The PFA length includes all
* of the TLVs, plus the initial length word itself, *and* one final
* word at the end after all of the TLVs.
*/
if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) {
dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n",
pfa_ptr, pfa_len);
return -EINVAL;
}
/* Starting with first TLV after PFA length, iterate through the list
* of TLVs to find the requested one.
*/
next_tlv = pfa_ptr + 1;
while (next_tlv < pfa_ptr + pfa_len) {
while (next_tlv < max_tlv) {
u16 tlv_sub_module_type;
u16 tlv_len;
@ -483,10 +494,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
}
return -EINVAL;
}
/* Check next TLV, i.e. current TLV pointer + length + 2 words
* (for current TLV's type and length)
*/
next_tlv = next_tlv + tlv_len + 2;
if (check_add_overflow(next_tlv, 2, &next_tlv) ||
check_add_overflow(next_tlv, tlv_len, &next_tlv)) {
dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n",
tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len);
return -EINVAL;
}
}
/* Module does not exist */
return -ENOENT;

View File

@ -289,7 +289,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
if (!pool)
return -EINVAL;
clear_bit(qid, vsi->af_xdp_zc_qps);
xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
return 0;
@ -320,8 +319,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if (err)
return err;
set_bit(qid, vsi->af_xdp_zc_qps);
return 0;
}
@ -369,11 +366,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
{
struct ice_rx_ring *rx_ring;
unsigned long q;
uint i;
ice_for_each_rxq(vsi, i) {
rx_ring = vsi->rx_rings[i];
if (!rx_ring->xsk_pool)
continue;
for_each_set_bit(q, vsi->af_xdp_zc_qps,
max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
rx_ring = vsi->rx_rings[q];
if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
return -ENOMEM;
}

View File

@ -2506,7 +2506,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
* - when available free entries are less.
* Lower priority ones out of avaialble free entries are always
* chosen when 'high vs low' question arises.
*
* For a VF base MCAM match rule is set by its PF. And all the
* further MCAM rules installed by VF on its own are
* concatenated with the base rule set by its PF. Hence PF entries
* should be at lower priority compared to VF entries. Otherwise
* base rule is hit always and rules installed by VF will be of
* no use. Hence if the request is from PF then allocate low
* priority entries.
*/
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
goto lprio_alloc;
/* Get the search range for priority allocation request */
if (req->priority) {
@ -2515,17 +2525,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
goto alloc;
}
/* For a VF base MCAM match rule is set by its PF. And all the
* further MCAM rules installed by VF on its own are
* concatenated with the base rule set by its PF. Hence PF entries
* should be at lower priority compared to VF entries. Otherwise
* base rule is hit always and rules installed by VF will be of
* no use. Hence if the request is from PF and NOT a priority
* allocation request then allocate low priority entries.
*/
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
goto lprio_alloc;
/* Find out the search range for non-priority allocation request
*
* Get MCAM free entry count in middle zone.
@ -2555,6 +2554,18 @@ lprio_alloc:
reverse = true;
start = 0;
end = mcam->bmap_entries;
/* Ensure PF requests are always at bottom and if PF requests
* for higher/lower priority entry wrt reference entry then
* honour that criteria and start search for entries from bottom
* and not in mid zone.
*/
if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
req->priority == NPC_MCAM_HIGHER_PRIO)
end = req->ref_entry;
if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
req->priority == NPC_MCAM_LOWER_PRIO)
start = req->ref_entry;
}
alloc:

View File

@ -4704,7 +4704,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
/* Verify if UDP port is being offloaded by HW */
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
return features;
return vxlan_features_check(skb, features);
#if IS_ENABLED(CONFIG_GENEVE)
/* Support Geneve offload for default UDP port */
@ -4730,7 +4730,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct mlx5e_priv *priv = netdev_priv(netdev);
features = vlan_features_check(skb, features);
features = vxlan_features_check(skb, features);
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&

View File

@ -373,6 +373,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
do {
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
break;
if (pci_channel_offline(dev->pdev)) {
mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
return -EACCES;
}
cond_resched();
} while (!time_after(jiffies, end));

View File

@ -248,6 +248,10 @@ recover_from_sw_reset:
do {
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
break;
if (pci_channel_offline(dev->pdev)) {
mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
goto unlock;
}
msleep(20);
} while (!time_after(jiffies, end));
@ -317,6 +321,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n");
return -ENODEV;
}
if (pci_channel_offline(dev->pdev)) {
mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n");
return -EACCES;
}
msleep(100);
}
return 0;

View File

@ -88,9 +88,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
&dest, 1);
if (IS_ERR(lag_definer->rules[idx])) {
err = PTR_ERR(lag_definer->rules[idx]);
while (i--)
while (j--)
do {
while (j--) {
idx = i * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
}
j = ldev->buckets;
} while (i--);
goto destroy_fg;
}
}

View File

@ -74,6 +74,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev)
ret = -EBUSY;
goto pci_unlock;
}
if (pci_channel_offline(dev->pdev)) {
ret = -EACCES;
goto pci_unlock;
}
/* Check if semaphore is already locked */
ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val);

View File

@ -1287,6 +1287,9 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
if (!err)
mlx5_function_disable(dev, boot);
else
mlx5_stop_health_poll(dev, boot);
return err;
}

View File

@ -296,10 +296,8 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
if (ret)
return ret;
if (qcq->napi.poll)
napi_enable(&qcq->napi);
if (qcq->flags & IONIC_QCQ_F_INTR) {
napi_enable(&qcq->napi);
irq_set_affinity_hint(qcq->intr.vector,
&qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,

View File

@ -93,6 +93,7 @@ struct ethqos_emac_driver_data {
bool has_emac_ge_3;
const char *link_clk_name;
bool has_integrated_pcs;
u32 dma_addr_width;
struct dwmac4_addrs dwmac4_addrs;
};
@ -272,6 +273,7 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
.has_emac_ge_3 = true,
.link_clk_name = "phyaux",
.has_integrated_pcs = true,
.dma_addr_width = 36,
.dwmac4_addrs = {
.dma_chan = 0x00008100,
.dma_chan_offset = 0x1000,
@ -816,6 +818,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI;
if (data->has_integrated_pcs)
plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS;
if (data->dma_addr_width)
plat_dat->host_dma_width = data->dma_addr_width;
if (ethqos->serdes_phy) {
plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup;

View File

@ -343,10 +343,11 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
struct tc_cbs_qopt_offload *qopt)
{
u32 tx_queues_count = priv->plat->tx_queues_to_use;
s64 port_transmit_rate_kbps;
u32 queue = qopt->queue;
u32 ptr, speed_div;
u32 mode_to_use;
u64 value;
u32 ptr;
int ret;
/* Queue 0 is not AVB capable */
@ -355,30 +356,26 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
if (!priv->dma_cap.av)
return -EOPNOTSUPP;
port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
/* Port Transmit Rate and Speed Divider */
switch (priv->speed) {
switch (div_s64(port_transmit_rate_kbps, 1000)) {
case SPEED_10000:
ptr = 32;
speed_div = 10000000;
break;
case SPEED_5000:
ptr = 32;
speed_div = 5000000;
break;
case SPEED_2500:
ptr = 8;
speed_div = 2500000;
break;
case SPEED_1000:
ptr = 8;
speed_div = 1000000;
break;
case SPEED_100:
ptr = 4;
speed_div = 100000;
break;
default:
return -EOPNOTSUPP;
netdev_err(priv->dev,
"Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
port_transmit_rate_kbps);
return -EINVAL;
}
mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
@ -398,10 +395,10 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
}
/* Final adjustments for HW */
value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps);
priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps);
priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
value = qopt->hicredit * 1024ll * 8;

View File

@ -915,6 +915,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
const struct ip_tunnel_key *key = &info->key;
@ -926,7 +927,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
if (!skb_vlan_inet_prepare(skb))
if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
@ -999,7 +1000,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
geneve->cfg.inner_proto_inherit);
inner_proto_inherit);
if (unlikely(err))
return err;
@ -1015,6 +1016,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
const struct ip_tunnel_key *key = &info->key;
@ -1024,7 +1026,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
if (!skb_vlan_inet_prepare(skb))
if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
@ -1079,7 +1081,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
ttl = ttl ? : ip6_dst_hoplimit(dst);
}
err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
geneve->cfg.inner_proto_inherit);
inner_proto_inherit);
if (unlikely(err))
return err;

View File

@ -770,6 +770,17 @@ static int ksz8061_config_init(struct phy_device *phydev)
{
int ret;
/* Chip can be powered down by the bootstrap code. */
ret = phy_read(phydev, MII_BMCR);
if (ret < 0)
return ret;
if (ret & BMCR_PDOWN) {
ret = phy_write(phydev, MII_BMCR, ret & ~BMCR_PDOWN);
if (ret < 0)
return ret;
usleep_range(1000, 2000);
}
ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
if (ret)
return ret;
@ -1821,7 +1832,7 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = {
{0x1c, 0x20, 0xeeee},
};
static int ksz9477_config_init(struct phy_device *phydev)
static int ksz9477_phy_errata(struct phy_device *phydev)
{
int err;
int i;
@ -1849,16 +1860,30 @@ static int ksz9477_config_init(struct phy_device *phydev)
return err;
}
err = genphy_restart_aneg(phydev);
if (err)
return err;
return err;
}
static int ksz9477_config_init(struct phy_device *phydev)
{
int err;
/* Only KSZ9897 family of switches needs this fix. */
if ((phydev->phy_id & 0xf) == 1) {
err = ksz9477_phy_errata(phydev);
if (err)
return err;
}
/* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
* in this switch shall be regarded as broken.
*/
if (phydev->dev_flags & MICREL_NO_EEE)
phydev->eee_broken_modes = -1;
err = genphy_restart_aneg(phydev);
if (err)
return err;
return kszphy_config_init(phydev);
}
@ -1967,6 +1992,71 @@ static int kszphy_resume(struct phy_device *phydev)
return 0;
}
static int ksz9477_resume(struct phy_device *phydev)
{
int ret;
/* No need to initialize registers if not powered down. */
ret = phy_read(phydev, MII_BMCR);
if (ret < 0)
return ret;
if (!(ret & BMCR_PDOWN))
return 0;
genphy_resume(phydev);
/* After switching from power-down to normal mode, an internal global
* reset is automatically generated. Wait a minimum of 1 ms before
* read/write access to the PHY registers.
*/
usleep_range(1000, 2000);
/* Only KSZ9897 family of switches needs this fix. */
if ((phydev->phy_id & 0xf) == 1) {
ret = ksz9477_phy_errata(phydev);
if (ret)
return ret;
}
/* Enable PHY Interrupts */
if (phy_interrupt_is_valid(phydev)) {
phydev->interrupts = PHY_INTERRUPT_ENABLED;
if (phydev->drv->config_intr)
phydev->drv->config_intr(phydev);
}
return 0;
}
static int ksz8061_resume(struct phy_device *phydev)
{
int ret;
/* This function can be called twice when the Ethernet device is on. */
ret = phy_read(phydev, MII_BMCR);
if (ret < 0)
return ret;
if (!(ret & BMCR_PDOWN))
return 0;
genphy_resume(phydev);
usleep_range(1000, 2000);
/* Re-program the value after chip is reset. */
ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
if (ret)
return ret;
/* Enable PHY Interrupts */
if (phy_interrupt_is_valid(phydev)) {
phydev->interrupts = PHY_INTERRUPT_ENABLED;
if (phydev->drv->config_intr)
phydev->drv->config_intr(phydev);
}
return 0;
}
static int kszphy_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@ -4762,7 +4852,7 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.suspend = kszphy_suspend,
.resume = kszphy_resume,
.resume = ksz8061_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
@ -4916,7 +5006,7 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.resume = ksz9477_resume,
.get_features = ksz9477_get_features,
} };

View File

@ -2394,8 +2394,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
/* Handle remove event globally, it resets this state machine */
if (event == SFP_E_REMOVE) {
if (sfp->sm_mod_state > SFP_MOD_PROBE)
sfp_sm_mod_remove(sfp);
sfp_sm_mod_remove(sfp);
sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0);
return;
}

View File

@ -2034,8 +2034,8 @@ vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
rq->data_ring.base,
rq->data_ring.basePA);
rq->data_ring.base = NULL;
rq->data_ring.desc_size = 0;
}
rq->data_ring.desc_size = 0;
}
}

View File

@ -1446,6 +1446,10 @@ static bool vxlan_snoop(struct net_device *dev,
struct vxlan_fdb *f;
u32 ifindex = 0;
/* Ignore packets from invalid src-address */
if (!is_valid_ether_addr(src_mac))
return true;
#if IS_ENABLED(CONFIG_IPV6)
if (src_ip->sa.sa_family == AF_INET6 &&
(ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))

Some files were not shown because too many files have changed in this diff Show More