Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/broadcom/genet/bcmmii.c drivers/net/hyperv/netvsc.c kernel/bpf/hashtab.c Almost entirely overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
16ae1f2236
18
MAINTAINERS
18
MAINTAINERS
|
@ -3223,7 +3223,6 @@ F: drivers/platform/chrome/
|
|||
|
||||
CISCO VIC ETHERNET NIC DRIVER
|
||||
M: Christian Benvenuti <benve@cisco.com>
|
||||
M: Sujith Sankar <ssujith@cisco.com>
|
||||
M: Govindarajulu Varadarajan <_govind@gmx.com>
|
||||
M: Neel Patel <neepatel@cisco.com>
|
||||
S: Supported
|
||||
|
@ -7781,13 +7780,6 @@ F: include/net/mac80211.h
|
|||
F: net/mac80211/
|
||||
F: drivers/net/wireless/mac80211_hwsim.[ch]
|
||||
|
||||
MACVLAN DRIVER
|
||||
M: Patrick McHardy <kaber@trash.net>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/macvlan.c
|
||||
F: include/linux/if_macvlan.h
|
||||
|
||||
MAILBOX API
|
||||
M: Jassi Brar <jassisinghbrar@gmail.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
|
@ -7860,6 +7852,8 @@ F: drivers/net/ethernet/marvell/mvneta.*
|
|||
MARVELL MWIFIEX WIRELESS DRIVER
|
||||
M: Amitkumar Karwar <akarwar@marvell.com>
|
||||
M: Nishant Sarmukadam <nishants@marvell.com>
|
||||
M: Ganapathi Bhat <gbhat@marvell.com>
|
||||
M: Xinming Hu <huxm@marvell.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/marvell/mwifiex/
|
||||
|
@ -13396,14 +13390,6 @@ W: https://linuxtv.org
|
|||
S: Maintained
|
||||
F: drivers/media/platform/vivid/*
|
||||
|
||||
VLAN (802.1Q)
|
||||
M: Patrick McHardy <kaber@trash.net>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/macvlan.c
|
||||
F: include/linux/if_*vlan.h
|
||||
F: net/8021q/
|
||||
|
||||
VLYNQ BUS
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
L: openwrt-devel@lists.openwrt.org (subscribers-only)
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 11
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -411,3 +411,4 @@
|
|||
394 common pkey_mprotect sys_pkey_mprotect
|
||||
395 common pkey_alloc sys_pkey_alloc
|
||||
396 common pkey_free sys_pkey_free
|
||||
397 common statx sys_statx
|
||||
|
|
|
@ -1073,6 +1073,10 @@ config SYSVIPC_COMPAT
|
|||
def_bool y
|
||||
depends on COMPAT && SYSVIPC
|
||||
|
||||
config KEYS_COMPAT
|
||||
def_bool y
|
||||
depends on COMPAT && KEYS
|
||||
|
||||
endmenu
|
||||
|
||||
menu "Power management options"
|
||||
|
|
|
@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void)
|
|||
static inline bool system_uses_ttbr0_pan(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
|
||||
!cpus_have_cap(ARM64_HAS_PAN);
|
||||
!cpus_have_const_cap(ARM64_HAS_PAN);
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu)
|
|||
}
|
||||
|
||||
/**
|
||||
* cpu_suspend() - function to enter a low-power idle state
|
||||
* arm_cpuidle_suspend() - function to enter a low-power idle state
|
||||
* @arg: argument to pass to CPU suspend operations
|
||||
*
|
||||
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
|
||||
|
|
|
@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *p, *cur_kprobe;
|
||||
|
|
|
@ -162,7 +162,7 @@ void __init kasan_init(void)
|
|||
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
|
||||
|
||||
vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
|
||||
pfn_to_nid(virt_to_pfn(_text)));
|
||||
pfn_to_nid(virt_to_pfn(lm_alias(_text))));
|
||||
|
||||
/*
|
||||
* vmemmap_populate() has populated the shadow region that covers the
|
||||
|
|
|
@ -77,7 +77,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
|
|||
return val;
|
||||
}
|
||||
|
||||
#define xchg(ptr, with) \
|
||||
((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr))))
|
||||
#define xchg(ptr, with) \
|
||||
({ \
|
||||
(__typeof__(*(ptr))) __xchg((unsigned long)(with), \
|
||||
(ptr), \
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
#endif /* __ASM_OPENRISC_CMPXCHG_H */
|
||||
|
|
|
@ -211,7 +211,7 @@ do { \
|
|||
case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \
|
||||
case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
|
||||
case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
|
||||
case 8: __get_user_asm2(x, ptr, retval); \
|
||||
case 8: __get_user_asm2(x, ptr, retval); break; \
|
||||
default: (x) = __get_user_bad(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <asm/hardirq.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
|
||||
|
||||
|
@ -42,6 +43,9 @@ DECLARE_EXPORT(__muldi3);
|
|||
DECLARE_EXPORT(__ashrdi3);
|
||||
DECLARE_EXPORT(__ashldi3);
|
||||
DECLARE_EXPORT(__lshrdi3);
|
||||
DECLARE_EXPORT(__ucmpdi2);
|
||||
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
EXPORT_SYMBOL(__copy_tofrom_user);
|
||||
EXPORT_SYMBOL(__clear_user);
|
||||
EXPORT_SYMBOL(memset);
|
||||
|
|
|
@ -90,6 +90,7 @@ void arch_cpu_idle(void)
|
|||
}
|
||||
|
||||
void (*pm_power_off) (void) = machine_power_off;
|
||||
EXPORT_SYMBOL(pm_power_off);
|
||||
|
||||
/*
|
||||
* When a process does an "exec", machine state like FPU and debug
|
||||
|
|
|
@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
|
|||
|
||||
#define flush_kernel_dcache_range(start,size) \
|
||||
flush_kernel_dcache_range_asm((start), (start)+(size));
|
||||
/* vmap range flushes and invalidates. Architecturally, we don't need
|
||||
* the invalidate, because the CPU should refuse to speculate once an
|
||||
* area has been flushed, so invalidate is left empty */
|
||||
static inline void flush_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
void *cursor = vaddr;
|
||||
|
||||
for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
|
||||
struct page *page = vmalloc_to_page(cursor);
|
||||
|
||||
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
|
||||
flush_kernel_dcache_page(page);
|
||||
}
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
void flush_kernel_vmap_range(void *vaddr, int size);
|
||||
void invalidate_kernel_vmap_range(void *vaddr, int size);
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
|
|
@ -32,7 +32,8 @@
|
|||
* that put_user is the same as __put_user, etc.
|
||||
*/
|
||||
|
||||
#define access_ok(type, uaddr, size) (1)
|
||||
#define access_ok(type, uaddr, size) \
|
||||
( (uaddr) == (uaddr) )
|
||||
|
||||
#define put_user __put_user
|
||||
#define get_user __get_user
|
||||
|
|
|
@ -362,8 +362,9 @@
|
|||
#define __NR_copy_file_range (__NR_Linux + 346)
|
||||
#define __NR_preadv2 (__NR_Linux + 347)
|
||||
#define __NR_pwritev2 (__NR_Linux + 348)
|
||||
#define __NR_statx (__NR_Linux + 349)
|
||||
|
||||
#define __NR_Linux_syscalls (__NR_pwritev2 + 1)
|
||||
#define __NR_Linux_syscalls (__NR_statx + 1)
|
||||
|
||||
|
||||
#define __IGNORE_select /* newselect */
|
||||
|
|
|
@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
|
|||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
|
||||
void flush_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
|
||||
if ((unsigned long)size > parisc_cache_flush_threshold)
|
||||
flush_data_cache();
|
||||
else
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_vmap_range);
|
||||
|
||||
void invalidate_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
|
||||
if ((unsigned long)size > parisc_cache_flush_threshold)
|
||||
flush_data_cache();
|
||||
else
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
|
||||
|
|
|
@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
|
|||
*/
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_SECREL32:
|
||||
/* 32-bit section relative address. */
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_DPREL21L:
|
||||
/* left 21 bit of relative address */
|
||||
val = lrsel(val - dp, addend);
|
||||
|
@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
|
|||
*/
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_SECREL32:
|
||||
/* 32-bit section relative address. */
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_FPTR64:
|
||||
/* 64-bit function address */
|
||||
if(in_local(me, (void *)(val + addend))) {
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
* the PDC INTRIGUE calls. This is done to eliminate bugs introduced
|
||||
* in various PDC revisions. The code is much more maintainable
|
||||
* and reliable this way vs having to debug on every version of PDC
|
||||
* on every box.
|
||||
* on every box.
|
||||
*/
|
||||
|
||||
#include <linux/capability.h>
|
||||
|
@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr);
|
|||
static int perf_release(struct inode *inode, struct file *file);
|
||||
static int perf_open(struct inode *inode, struct file *file);
|
||||
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
|
||||
loff_t *ppos);
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos);
|
||||
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
static void perf_start_counters(void);
|
||||
static int perf_stop_counters(uint32_t *raddr);
|
||||
|
@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void);
|
|||
/*
|
||||
* configure:
|
||||
*
|
||||
* Configure the cpu with a given data image. First turn off the counters,
|
||||
* Configure the cpu with a given data image. First turn off the counters,
|
||||
* then download the image, then turn the counters back on.
|
||||
*/
|
||||
static int perf_config(uint32_t *image_ptr)
|
||||
|
@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr)
|
|||
error = perf_stop_counters(raddr);
|
||||
if (error != 0) {
|
||||
printk("perf_config: perf_stop_counters = %ld\n", error);
|
||||
return -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
printk("Preparing to write image\n");
|
||||
|
@ -242,7 +242,7 @@ printk("Preparing to write image\n");
|
|||
error = perf_write_image((uint64_t *)image_ptr);
|
||||
if (error != 0) {
|
||||
printk("perf_config: DOWNLOAD = %ld\n", error);
|
||||
return -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
printk("Preparing to start counters\n");
|
||||
|
@ -254,7 +254,7 @@ printk("Preparing to start counters\n");
|
|||
}
|
||||
|
||||
/*
|
||||
* Open the device and initialize all of its memory. The device is only
|
||||
* Open the device and initialize all of its memory. The device is only
|
||||
* opened once, but can be "queried" by multiple processes that know its
|
||||
* file descriptor.
|
||||
*/
|
||||
|
@ -298,19 +298,19 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
|
|||
* called on the processor that the download should happen
|
||||
* on.
|
||||
*/
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
|
||||
loff_t *ppos)
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
size_t image_size;
|
||||
uint32_t image_type;
|
||||
uint32_t interface_type;
|
||||
uint32_t test;
|
||||
|
||||
if (perf_processor_interface == ONYX_INTF)
|
||||
if (perf_processor_interface == ONYX_INTF)
|
||||
image_size = PCXU_IMAGE_SIZE;
|
||||
else if (perf_processor_interface == CUDA_INTF)
|
||||
else if (perf_processor_interface == CUDA_INTF)
|
||||
image_size = PCXW_IMAGE_SIZE;
|
||||
else
|
||||
else
|
||||
return -EFAULT;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
|
@ -330,22 +330,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
|
|||
|
||||
/* First check the machine type is correct for
|
||||
the requested image */
|
||||
if (((perf_processor_interface == CUDA_INTF) &&
|
||||
(interface_type != CUDA_INTF)) ||
|
||||
((perf_processor_interface == ONYX_INTF) &&
|
||||
(interface_type != ONYX_INTF)))
|
||||
if (((perf_processor_interface == CUDA_INTF) &&
|
||||
(interface_type != CUDA_INTF)) ||
|
||||
((perf_processor_interface == ONYX_INTF) &&
|
||||
(interface_type != ONYX_INTF)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Next check to make sure the requested image
|
||||
is valid */
|
||||
if (((interface_type == CUDA_INTF) &&
|
||||
if (((interface_type == CUDA_INTF) &&
|
||||
(test >= MAX_CUDA_IMAGES)) ||
|
||||
((interface_type == ONYX_INTF) &&
|
||||
(test >= MAX_ONYX_IMAGES)))
|
||||
((interface_type == ONYX_INTF) &&
|
||||
(test >= MAX_ONYX_IMAGES)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Copy the image into the processor */
|
||||
if (interface_type == CUDA_INTF)
|
||||
if (interface_type == CUDA_INTF)
|
||||
return perf_config(cuda_images[test]);
|
||||
else
|
||||
return perf_config(onyx_images[test]);
|
||||
|
@ -359,7 +359,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
|
|||
static void perf_patch_images(void)
|
||||
{
|
||||
#if 0 /* FIXME!! */
|
||||
/*
|
||||
/*
|
||||
* NOTE: this routine is VERY specific to the current TLB image.
|
||||
* If the image is changed, this routine might also need to be changed.
|
||||
*/
|
||||
|
@ -367,9 +367,9 @@ static void perf_patch_images(void)
|
|||
extern void $i_dtlb_miss_2_0();
|
||||
extern void PA2_0_iva();
|
||||
|
||||
/*
|
||||
/*
|
||||
* We can only use the lower 32-bits, the upper 32-bits should be 0
|
||||
* anyway given this is in the kernel
|
||||
* anyway given this is in the kernel
|
||||
*/
|
||||
uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
|
||||
uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
|
||||
|
@ -377,21 +377,21 @@ static void perf_patch_images(void)
|
|||
|
||||
if (perf_processor_interface == ONYX_INTF) {
|
||||
/* clear last 2 bytes */
|
||||
onyx_images[TLBMISS][15] &= 0xffffff00;
|
||||
onyx_images[TLBMISS][15] &= 0xffffff00;
|
||||
/* set 2 bytes */
|
||||
onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
|
||||
onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
|
||||
onyx_images[TLBMISS][17] = itlb_addr;
|
||||
|
||||
/* clear last 2 bytes */
|
||||
onyx_images[TLBHANDMISS][15] &= 0xffffff00;
|
||||
onyx_images[TLBHANDMISS][15] &= 0xffffff00;
|
||||
/* set 2 bytes */
|
||||
onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
|
||||
onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
|
||||
onyx_images[TLBHANDMISS][17] = itlb_addr;
|
||||
|
||||
/* clear last 2 bytes */
|
||||
onyx_images[BIG_CPI][15] &= 0xffffff00;
|
||||
onyx_images[BIG_CPI][15] &= 0xffffff00;
|
||||
/* set 2 bytes */
|
||||
onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
|
||||
onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
|
||||
|
@ -404,24 +404,24 @@ static void perf_patch_images(void)
|
|||
|
||||
} else if (perf_processor_interface == CUDA_INTF) {
|
||||
/* Cuda interface */
|
||||
cuda_images[TLBMISS][16] =
|
||||
cuda_images[TLBMISS][16] =
|
||||
(cuda_images[TLBMISS][16]&0xffff0000) |
|
||||
((dtlb_addr >> 8)&0x0000ffff);
|
||||
cuda_images[TLBMISS][17] =
|
||||
cuda_images[TLBMISS][17] =
|
||||
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
|
||||
cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
|
||||
|
||||
cuda_images[TLBHANDMISS][16] =
|
||||
cuda_images[TLBHANDMISS][16] =
|
||||
(cuda_images[TLBHANDMISS][16]&0xffff0000) |
|
||||
((dtlb_addr >> 8)&0x0000ffff);
|
||||
cuda_images[TLBHANDMISS][17] =
|
||||
cuda_images[TLBHANDMISS][17] =
|
||||
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
|
||||
cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
|
||||
|
||||
cuda_images[BIG_CPI][16] =
|
||||
cuda_images[BIG_CPI][16] =
|
||||
(cuda_images[BIG_CPI][16]&0xffff0000) |
|
||||
((dtlb_addr >> 8)&0x0000ffff);
|
||||
cuda_images[BIG_CPI][17] =
|
||||
cuda_images[BIG_CPI][17] =
|
||||
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
|
||||
cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
|
||||
} else {
|
||||
|
@ -433,7 +433,7 @@ static void perf_patch_images(void)
|
|||
|
||||
/*
|
||||
* ioctl routine
|
||||
* All routines effect the processor that they are executed on. Thus you
|
||||
* All routines effect the processor that they are executed on. Thus you
|
||||
* must be running on the processor that you wish to change.
|
||||
*/
|
||||
|
||||
|
@ -459,7 +459,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
}
|
||||
|
||||
/* copy out the Counters */
|
||||
if (copy_to_user((void __user *)arg, raddr,
|
||||
if (copy_to_user((void __user *)arg, raddr,
|
||||
sizeof (raddr)) != 0) {
|
||||
error = -EFAULT;
|
||||
break;
|
||||
|
@ -487,7 +487,7 @@ static const struct file_operations perf_fops = {
|
|||
.open = perf_open,
|
||||
.release = perf_release
|
||||
};
|
||||
|
||||
|
||||
static struct miscdevice perf_dev = {
|
||||
MISC_DYNAMIC_MINOR,
|
||||
PA_PERF_DEV,
|
||||
|
@ -595,7 +595,7 @@ static int perf_stop_counters(uint32_t *raddr)
|
|||
/* OR sticky2 (bit 1496) to counter2 bit 32 */
|
||||
tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
|
||||
raddr[2] = (uint32_t)tmp64;
|
||||
|
||||
|
||||
/* Counter3 is bits 1497 to 1528 */
|
||||
tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
|
||||
/* OR sticky3 (bit 1529) to counter3 bit 32 */
|
||||
|
@ -617,7 +617,7 @@ static int perf_stop_counters(uint32_t *raddr)
|
|||
userbuf[22] = 0;
|
||||
userbuf[23] = 0;
|
||||
|
||||
/*
|
||||
/*
|
||||
* Write back the zeroed bytes + the image given
|
||||
* the read was destructive.
|
||||
*/
|
||||
|
@ -625,13 +625,13 @@ static int perf_stop_counters(uint32_t *raddr)
|
|||
} else {
|
||||
|
||||
/*
|
||||
* Read RDR-15 which contains the counters and sticky bits
|
||||
* Read RDR-15 which contains the counters and sticky bits
|
||||
*/
|
||||
if (!perf_rdr_read_ubuf(15, userbuf)) {
|
||||
return -13;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Clear out the counters
|
||||
*/
|
||||
perf_rdr_clear(15);
|
||||
|
@ -644,7 +644,7 @@ static int perf_stop_counters(uint32_t *raddr)
|
|||
raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
|
||||
raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -682,7 +682,7 @@ static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
|
|||
i = tentry->num_words;
|
||||
while (i--) {
|
||||
buffer[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check for bits an even number of 64 */
|
||||
if ((xbits = width & 0x03f) != 0) {
|
||||
|
@ -808,18 +808,22 @@ static int perf_write_image(uint64_t *memaddr)
|
|||
}
|
||||
|
||||
runway = ioremap_nocache(cpu_device->hpa.start, 4096);
|
||||
if (!runway) {
|
||||
pr_err("perf_write_image: ioremap failed!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Merge intrigue bits into Runway STATUS 0 */
|
||||
tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
|
||||
__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
|
||||
__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
|
||||
runway + RUNWAY_STATUS);
|
||||
|
||||
|
||||
/* Write RUNWAY DEBUG registers */
|
||||
for (i = 0; i < 8; i++) {
|
||||
__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -843,7 +847,7 @@ printk("perf_rdr_write\n");
|
|||
perf_rdr_shift_out_U(rdr_num, buffer[i]);
|
||||
} else {
|
||||
perf_rdr_shift_out_W(rdr_num, buffer[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
printk("perf_rdr_write done\n");
|
||||
}
|
||||
|
|
|
@ -142,6 +142,8 @@ void machine_power_off(void)
|
|||
|
||||
printk(KERN_EMERG "System shut down completed.\n"
|
||||
"Please power this system off now.");
|
||||
|
||||
for (;;);
|
||||
}
|
||||
|
||||
void (*pm_power_off)(void) = machine_power_off;
|
||||
|
|
|
@ -444,6 +444,7 @@
|
|||
ENTRY_SAME(copy_file_range)
|
||||
ENTRY_COMP(preadv2)
|
||||
ENTRY_COMP(pwritev2)
|
||||
ENTRY_SAME(statx)
|
||||
|
||||
|
||||
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
|
||||
|
|
|
@ -387,3 +387,4 @@ SYSCALL(copy_file_range)
|
|||
COMPAT_SYS_SPU(preadv2)
|
||||
COMPAT_SYS_SPU(pwritev2)
|
||||
SYSCALL(kexec_file_load)
|
||||
SYSCALL(statx)
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <uapi/asm/unistd.h>
|
||||
|
||||
|
||||
#define NR_syscalls 383
|
||||
#define NR_syscalls 384
|
||||
|
||||
#define __NR__exit __NR_exit
|
||||
|
||||
|
|
|
@ -393,5 +393,6 @@
|
|||
#define __NR_preadv2 380
|
||||
#define __NR_pwritev2 381
|
||||
#define __NR_kexec_file_load 382
|
||||
#define __NR_statx 383
|
||||
|
||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
||||
|
|
|
@ -751,7 +751,9 @@ void __init hpte_init_pseries(void)
|
|||
mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
|
||||
mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
|
||||
mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
|
||||
mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
|
||||
|
||||
if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
|
||||
mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
|
||||
}
|
||||
|
||||
void radix_init_pseries(void)
|
||||
|
|
|
@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event)
|
|||
|
||||
static void refresh_pce(void *ignored)
|
||||
{
|
||||
if (current->mm)
|
||||
load_mm_cr4(current->mm);
|
||||
if (current->active_mm)
|
||||
load_mm_cr4(current->active_mm);
|
||||
}
|
||||
|
||||
static void x86_pmu_event_mapped(struct perf_event *event)
|
||||
|
@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event)
|
|||
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
|
||||
return;
|
||||
|
||||
/*
|
||||
* This function relies on not being called concurrently in two
|
||||
* tasks in the same mm. Otherwise one task could observe
|
||||
* perf_rdpmc_allowed > 1 and return all the way back to
|
||||
* userspace with CR4.PCE clear while another task is still
|
||||
* doing on_each_cpu_mask() to propagate CR4.PCE.
|
||||
*
|
||||
* For now, this can't happen because all callers hold mmap_sem
|
||||
* for write. If this changes, we'll need a different solution.
|
||||
*/
|
||||
lockdep_assert_held_exclusive(¤t->mm->mmap_sem);
|
||||
|
||||
if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1)
|
||||
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
|
||||
}
|
||||
|
|
|
@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd)
|
|||
*(tmp + 1) = 0;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \
|
||||
defined(CONFIG_PARAVIRT))
|
||||
static inline void native_pud_clear(pud_t *pudp)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void pud_clear(pud_t *pudp)
|
||||
{
|
||||
|
|
|
@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
|
|||
# define set_pud(pudp, pud) native_set_pud(pudp, pud)
|
||||
#endif
|
||||
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
#ifndef __PAGETABLE_PUD_FOLDED
|
||||
#define pud_clear(pud) native_pud_clear(pud)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -179,10 +179,15 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!enabled) {
|
||||
++disabled_cpus;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (boot_cpu_physical_apicid != -1U)
|
||||
ver = boot_cpu_apic_version;
|
||||
|
||||
cpu = __generic_processor_info(id, ver, enabled);
|
||||
cpu = generic_processor_info(id, ver);
|
||||
if (cpu >= 0)
|
||||
early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
|
||||
|
||||
|
@ -710,7 +715,7 @@ static void __init acpi_set_irq_model_ioapic(void)
|
|||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
#include <acpi/processor.h>
|
||||
|
||||
int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
{
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
int nid;
|
||||
|
|
|
@ -2063,7 +2063,7 @@ static int allocate_logical_cpuid(int apicid)
|
|||
return nr_logical_cpuids++;
|
||||
}
|
||||
|
||||
int __generic_processor_info(int apicid, int version, bool enabled)
|
||||
int generic_processor_info(int apicid, int version)
|
||||
{
|
||||
int cpu, max = nr_cpu_ids;
|
||||
bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
|
||||
|
@ -2121,11 +2121,9 @@ int __generic_processor_info(int apicid, int version, bool enabled)
|
|||
if (num_processors >= nr_cpu_ids) {
|
||||
int thiscpu = max + disabled_cpus;
|
||||
|
||||
if (enabled) {
|
||||
pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
|
||||
"reached. Processor %d/0x%x ignored.\n",
|
||||
max, thiscpu, apicid);
|
||||
}
|
||||
pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
|
||||
"reached. Processor %d/0x%x ignored.\n",
|
||||
max, thiscpu, apicid);
|
||||
|
||||
disabled_cpus++;
|
||||
return -EINVAL;
|
||||
|
@ -2177,23 +2175,13 @@ int __generic_processor_info(int apicid, int version, bool enabled)
|
|||
apic->x86_32_early_logical_apicid(cpu);
|
||||
#endif
|
||||
set_cpu_possible(cpu, true);
|
||||
|
||||
if (enabled) {
|
||||
num_processors++;
|
||||
physid_set(apicid, phys_cpu_present_map);
|
||||
set_cpu_present(cpu, true);
|
||||
} else {
|
||||
disabled_cpus++;
|
||||
}
|
||||
physid_set(apicid, phys_cpu_present_map);
|
||||
set_cpu_present(cpu, true);
|
||||
num_processors++;
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
int generic_processor_info(int apicid, int version)
|
||||
{
|
||||
return __generic_processor_info(apicid, version, true);
|
||||
}
|
||||
|
||||
int hard_smp_processor_id(void)
|
||||
{
|
||||
return read_apic_id();
|
||||
|
|
|
@ -727,7 +727,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
|
|||
if (atomic_dec_and_test(&rdtgrp->waitcount) &&
|
||||
(rdtgrp->flags & RDT_DELETED)) {
|
||||
kernfs_unbreak_active_protection(kn);
|
||||
kernfs_put(kn);
|
||||
kernfs_put(rdtgrp->kn);
|
||||
kfree(rdtgrp);
|
||||
} else {
|
||||
kernfs_unbreak_active_protection(kn);
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
|
||||
*/
|
||||
|
||||
#define DISABLE_BRANCH_PROFILING
|
||||
#include <linux/init.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/types.h>
|
||||
|
|
|
@ -166,11 +166,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
|
|||
spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
/*
|
||||
* most handlers of type NMI_UNKNOWN never return because
|
||||
* they just assume the NMI is theirs. Just a sanity check
|
||||
* to manage expectations
|
||||
* Indicate if there are multiple registrations on the
|
||||
* internal NMI handler call chains (SERR and IO_CHECK).
|
||||
*/
|
||||
WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
|
||||
WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
|
||||
WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
|
||||
|
||||
|
|
|
@ -1333,6 +1333,8 @@ static int __init init_tsc_clocksource(void)
|
|||
* the refined calibration and directly register it as a clocksource.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
|
||||
if (boot_cpu_has(X86_FEATURE_ART))
|
||||
art_related_clocksource = &clocksource_tsc;
|
||||
clocksource_register_khz(&clocksource_tsc, tsc_khz);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -82,19 +82,43 @@ static size_t regs_size(struct pt_regs *regs)
|
|||
return sizeof(*regs);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define GCC_REALIGN_WORDS 3
|
||||
#else
|
||||
#define GCC_REALIGN_WORDS 1
|
||||
#endif
|
||||
|
||||
static bool is_last_task_frame(struct unwind_state *state)
|
||||
{
|
||||
unsigned long bp = (unsigned long)state->bp;
|
||||
unsigned long regs = (unsigned long)task_pt_regs(state->task);
|
||||
unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2;
|
||||
unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS;
|
||||
|
||||
/*
|
||||
* We have to check for the last task frame at two different locations
|
||||
* because gcc can occasionally decide to realign the stack pointer and
|
||||
* change the offset of the stack frame by a word in the prologue of a
|
||||
* function called by head/entry code.
|
||||
* change the offset of the stack frame in the prologue of a function
|
||||
* called by head/entry code. Examples:
|
||||
*
|
||||
* <start_secondary>:
|
||||
* push %edi
|
||||
* lea 0x8(%esp),%edi
|
||||
* and $0xfffffff8,%esp
|
||||
* pushl -0x4(%edi)
|
||||
* push %ebp
|
||||
* mov %esp,%ebp
|
||||
*
|
||||
* <x86_64_start_kernel>:
|
||||
* lea 0x8(%rsp),%r10
|
||||
* and $0xfffffffffffffff0,%rsp
|
||||
* pushq -0x8(%r10)
|
||||
* push %rbp
|
||||
* mov %rsp,%rbp
|
||||
*
|
||||
* Note that after aligning the stack, it pushes a duplicate copy of
|
||||
* the return address before pushing the frame pointer.
|
||||
*/
|
||||
return bp == regs - FRAME_HEADER_SIZE ||
|
||||
bp == regs - FRAME_HEADER_SIZE - sizeof(long);
|
||||
return (state->bp == last_bp ||
|
||||
(state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#define DISABLE_BRANCH_PROFILING
|
||||
#define pr_fmt(fmt) "kasan: " fmt
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/kasan.h>
|
||||
|
|
|
@ -590,7 +590,7 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
|
|||
* we might run off the end of the bounds table if we are on
|
||||
* a 64-bit kernel and try to get 8 bytes.
|
||||
*/
|
||||
int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
|
||||
static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
|
||||
long __user *bd_entry_ptr)
|
||||
{
|
||||
u32 bd_entry_32;
|
||||
|
|
|
@ -26,5 +26,6 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
|
|||
obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
|
||||
# MISC Devices
|
||||
obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
|
||||
obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o
|
||||
obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o
|
||||
obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
* Intel Merrifield power button support
|
||||
*
|
||||
* (C) Copyright 2017 Intel Corporation
|
||||
*
|
||||
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; version 2
|
||||
* of the License.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/sfi.h>
|
||||
|
||||
#include <asm/intel-mid.h>
|
||||
#include <asm/intel_scu_ipc.h>
|
||||
|
||||
static struct resource mrfld_power_btn_resources[] = {
|
||||
{
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device mrfld_power_btn_dev = {
|
||||
.name = "msic_power_btn",
|
||||
.id = PLATFORM_DEVID_NONE,
|
||||
.num_resources = ARRAY_SIZE(mrfld_power_btn_resources),
|
||||
.resource = mrfld_power_btn_resources,
|
||||
};
|
||||
|
||||
static int mrfld_power_btn_scu_status_change(struct notifier_block *nb,
|
||||
unsigned long code, void *data)
|
||||
{
|
||||
if (code == SCU_DOWN) {
|
||||
platform_device_unregister(&mrfld_power_btn_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return platform_device_register(&mrfld_power_btn_dev);
|
||||
}
|
||||
|
||||
static struct notifier_block mrfld_power_btn_scu_notifier = {
|
||||
.notifier_call = mrfld_power_btn_scu_status_change,
|
||||
};
|
||||
|
||||
static int __init register_mrfld_power_btn(void)
|
||||
{
|
||||
if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* We need to be sure that the SCU IPC is ready before
|
||||
* PMIC power button device can be registered:
|
||||
*/
|
||||
intel_scu_notifier_add(&mrfld_power_btn_scu_notifier);
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(register_mrfld_power_btn);
|
||||
|
||||
static void __init *mrfld_power_btn_platform_data(void *info)
|
||||
{
|
||||
struct resource *res = mrfld_power_btn_resources;
|
||||
struct sfi_device_table_entry *pentry = info;
|
||||
|
||||
res->start = res->end = pentry->irq;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const struct devs_id mrfld_power_btn_dev_id __initconst = {
|
||||
.name = "bcove_power_btn",
|
||||
.type = SFI_DEV_TYPE_IPC,
|
||||
.delay = 1,
|
||||
.msic = 1,
|
||||
.get_platform_data = &mrfld_power_btn_platform_data,
|
||||
};
|
||||
|
||||
sfi_device(mrfld_power_btn_dev_id);
|
|
@ -19,7 +19,7 @@
|
|||
#include <asm/intel_scu_ipc.h>
|
||||
#include <asm/io_apic.h>
|
||||
|
||||
#define TANGIER_EXT_TIMER0_MSI 15
|
||||
#define TANGIER_EXT_TIMER0_MSI 12
|
||||
|
||||
static struct platform_device wdt_dev = {
|
||||
.name = "intel_mid_wdt",
|
||||
|
|
|
@ -17,16 +17,6 @@
|
|||
|
||||
#include "intel_mid_weak_decls.h"
|
||||
|
||||
static void penwell_arch_setup(void);
|
||||
/* penwell arch ops */
|
||||
static struct intel_mid_ops penwell_ops = {
|
||||
.arch_setup = penwell_arch_setup,
|
||||
};
|
||||
|
||||
static void mfld_power_off(void)
|
||||
{
|
||||
}
|
||||
|
||||
static unsigned long __init mfld_calibrate_tsc(void)
|
||||
{
|
||||
unsigned long fast_calibrate;
|
||||
|
@ -63,9 +53,12 @@ static unsigned long __init mfld_calibrate_tsc(void)
|
|||
static void __init penwell_arch_setup(void)
|
||||
{
|
||||
x86_platform.calibrate_tsc = mfld_calibrate_tsc;
|
||||
pm_power_off = mfld_power_off;
|
||||
}
|
||||
|
||||
static struct intel_mid_ops penwell_ops = {
|
||||
.arch_setup = penwell_arch_setup,
|
||||
};
|
||||
|
||||
void *get_penwell_ops(void)
|
||||
{
|
||||
return &penwell_ops;
|
||||
|
|
12
block/bio.c
12
block/bio.c
|
@ -376,10 +376,14 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
|
|||
bio_list_init(&punt);
|
||||
bio_list_init(&nopunt);
|
||||
|
||||
while ((bio = bio_list_pop(current->bio_list)))
|
||||
while ((bio = bio_list_pop(¤t->bio_list[0])))
|
||||
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
|
||||
current->bio_list[0] = nopunt;
|
||||
|
||||
*current->bio_list = nopunt;
|
||||
bio_list_init(&nopunt);
|
||||
while ((bio = bio_list_pop(¤t->bio_list[1])))
|
||||
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
|
||||
current->bio_list[1] = nopunt;
|
||||
|
||||
spin_lock(&bs->rescue_lock);
|
||||
bio_list_merge(&bs->rescue_list, &punt);
|
||||
|
@ -466,7 +470,9 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
|||
* we retry with the original gfp_flags.
|
||||
*/
|
||||
|
||||
if (current->bio_list && !bio_list_empty(current->bio_list))
|
||||
if (current->bio_list &&
|
||||
(!bio_list_empty(¤t->bio_list[0]) ||
|
||||
!bio_list_empty(¤t->bio_list[1])))
|
||||
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
|
||||
|
||||
p = mempool_alloc(bs->bio_pool, gfp_mask);
|
||||
|
|
|
@ -1973,7 +1973,14 @@ end_io:
|
|||
*/
|
||||
blk_qc_t generic_make_request(struct bio *bio)
|
||||
{
|
||||
struct bio_list bio_list_on_stack;
|
||||
/*
|
||||
* bio_list_on_stack[0] contains bios submitted by the current
|
||||
* make_request_fn.
|
||||
* bio_list_on_stack[1] contains bios that were submitted before
|
||||
* the current make_request_fn, but that haven't been processed
|
||||
* yet.
|
||||
*/
|
||||
struct bio_list bio_list_on_stack[2];
|
||||
blk_qc_t ret = BLK_QC_T_NONE;
|
||||
|
||||
if (!generic_make_request_checks(bio))
|
||||
|
@ -1990,7 +1997,7 @@ blk_qc_t generic_make_request(struct bio *bio)
|
|||
* should be added at the tail
|
||||
*/
|
||||
if (current->bio_list) {
|
||||
bio_list_add(current->bio_list, bio);
|
||||
bio_list_add(¤t->bio_list[0], bio);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -2009,18 +2016,17 @@ blk_qc_t generic_make_request(struct bio *bio)
|
|||
* bio_list, and call into ->make_request() again.
|
||||
*/
|
||||
BUG_ON(bio->bi_next);
|
||||
bio_list_init(&bio_list_on_stack);
|
||||
current->bio_list = &bio_list_on_stack;
|
||||
bio_list_init(&bio_list_on_stack[0]);
|
||||
current->bio_list = bio_list_on_stack;
|
||||
do {
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
|
||||
if (likely(blk_queue_enter(q, false) == 0)) {
|
||||
struct bio_list hold;
|
||||
struct bio_list lower, same;
|
||||
|
||||
/* Create a fresh bio_list for all subordinate requests */
|
||||
hold = bio_list_on_stack;
|
||||
bio_list_init(&bio_list_on_stack);
|
||||
bio_list_on_stack[1] = bio_list_on_stack[0];
|
||||
bio_list_init(&bio_list_on_stack[0]);
|
||||
ret = q->make_request_fn(q, bio);
|
||||
|
||||
blk_queue_exit(q);
|
||||
|
@ -2030,19 +2036,19 @@ blk_qc_t generic_make_request(struct bio *bio)
|
|||
*/
|
||||
bio_list_init(&lower);
|
||||
bio_list_init(&same);
|
||||
while ((bio = bio_list_pop(&bio_list_on_stack)) != NULL)
|
||||
while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
|
||||
if (q == bdev_get_queue(bio->bi_bdev))
|
||||
bio_list_add(&same, bio);
|
||||
else
|
||||
bio_list_add(&lower, bio);
|
||||
/* now assemble so we handle the lowest level first */
|
||||
bio_list_merge(&bio_list_on_stack, &lower);
|
||||
bio_list_merge(&bio_list_on_stack, &same);
|
||||
bio_list_merge(&bio_list_on_stack, &hold);
|
||||
bio_list_merge(&bio_list_on_stack[0], &lower);
|
||||
bio_list_merge(&bio_list_on_stack[0], &same);
|
||||
bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
|
||||
} else {
|
||||
bio_io_error(bio);
|
||||
}
|
||||
bio = bio_list_pop(current->bio_list);
|
||||
bio = bio_list_pop(&bio_list_on_stack[0]);
|
||||
} while (bio);
|
||||
current->bio_list = NULL; /* deactivate */
|
||||
|
||||
|
|
|
@ -295,6 +295,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
|
|||
for (i = 0; i < set->nr_hw_queues; i++) {
|
||||
struct blk_mq_tags *tags = set->tags[i];
|
||||
|
||||
if (!tags)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < tags->nr_tags; j++) {
|
||||
if (!tags->static_rqs[j])
|
||||
continue;
|
||||
|
|
|
@ -1434,7 +1434,8 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
|
|||
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
|
||||
}
|
||||
|
||||
static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
|
||||
static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
|
||||
bool may_sleep)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct blk_mq_queue_data bd = {
|
||||
|
@ -1475,7 +1476,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
|
|||
}
|
||||
|
||||
insert:
|
||||
blk_mq_sched_insert_request(rq, false, true, true, false);
|
||||
blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1569,11 +1570,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
|
||||
if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
|
||||
rcu_read_lock();
|
||||
blk_mq_try_issue_directly(old_rq, &cookie);
|
||||
blk_mq_try_issue_directly(old_rq, &cookie, false);
|
||||
rcu_read_unlock();
|
||||
} else {
|
||||
srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
|
||||
blk_mq_try_issue_directly(old_rq, &cookie);
|
||||
blk_mq_try_issue_directly(old_rq, &cookie, true);
|
||||
srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
|
||||
}
|
||||
goto done;
|
||||
|
|
|
@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu)
|
|||
|
||||
void __weak arch_unregister_cpu(int cpu) {}
|
||||
|
||||
int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int acpi_processor_hotadd_init(struct acpi_processor *pr)
|
||||
{
|
||||
unsigned long long sta;
|
||||
|
@ -285,6 +280,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
|
|||
pr->acpi_id = value;
|
||||
}
|
||||
|
||||
if (acpi_duplicate_processor_id(pr->acpi_id)) {
|
||||
dev_err(&device->dev,
|
||||
"Failed to get unique processor _UID (0x%x)\n",
|
||||
pr->acpi_id);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
|
||||
pr->acpi_id);
|
||||
if (invalid_phys_cpuid(pr->phys_id))
|
||||
|
@ -585,7 +587,7 @@ static struct acpi_scan_handler processor_container_handler = {
|
|||
static int nr_unique_ids __initdata;
|
||||
|
||||
/* The number of the duplicate processor IDs */
|
||||
static int nr_duplicate_ids __initdata;
|
||||
static int nr_duplicate_ids;
|
||||
|
||||
/* Used to store the unique processor IDs */
|
||||
static int unique_processor_ids[] __initdata = {
|
||||
|
@ -593,7 +595,7 @@ static int unique_processor_ids[] __initdata = {
|
|||
};
|
||||
|
||||
/* Used to store the duplicate processor IDs */
|
||||
static int duplicate_processor_ids[] __initdata = {
|
||||
static int duplicate_processor_ids[] = {
|
||||
[0 ... NR_CPUS - 1] = -1,
|
||||
};
|
||||
|
||||
|
@ -638,28 +640,53 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
|
|||
void **rv)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_object_type acpi_type;
|
||||
unsigned long long uid;
|
||||
union acpi_object object = { 0 };
|
||||
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
|
||||
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
|
||||
status = acpi_get_type(handle, &acpi_type);
|
||||
if (ACPI_FAILURE(status))
|
||||
acpi_handle_info(handle, "Not get the processor object\n");
|
||||
else
|
||||
processor_validated_ids_update(object.processor.proc_id);
|
||||
return false;
|
||||
|
||||
switch (acpi_type) {
|
||||
case ACPI_TYPE_PROCESSOR:
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
|
||||
if (ACPI_FAILURE(status))
|
||||
goto err;
|
||||
uid = object.processor.proc_id;
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_DEVICE:
|
||||
status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
|
||||
if (ACPI_FAILURE(status))
|
||||
goto err;
|
||||
break;
|
||||
default:
|
||||
goto err;
|
||||
}
|
||||
|
||||
processor_validated_ids_update(uid);
|
||||
return true;
|
||||
|
||||
err:
|
||||
acpi_handle_info(handle, "Invalid processor object\n");
|
||||
return false;
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
static void __init acpi_processor_check_duplicates(void)
|
||||
void __init acpi_processor_check_duplicates(void)
|
||||
{
|
||||
/* Search all processor nodes in ACPI namespace */
|
||||
/* check the correctness for all processors in ACPI namespace */
|
||||
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX,
|
||||
acpi_processor_ids_walk,
|
||||
NULL, NULL, NULL);
|
||||
acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk,
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
bool __init acpi_processor_validate_proc_id(int proc_id)
|
||||
bool acpi_duplicate_processor_id(int proc_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
|
|
@ -1249,7 +1249,6 @@ static int __init acpi_init(void)
|
|||
acpi_wakeup_device_init();
|
||||
acpi_debugger_init();
|
||||
acpi_setup_sb_notify_handler();
|
||||
acpi_set_processor_mapping();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -32,12 +32,12 @@ static struct acpi_table_madt *get_madt_table(void)
|
|||
}
|
||||
|
||||
static int map_lapic_id(struct acpi_subtable_header *entry,
|
||||
u32 acpi_id, phys_cpuid_t *apic_id, bool ignore_disabled)
|
||||
u32 acpi_id, phys_cpuid_t *apic_id)
|
||||
{
|
||||
struct acpi_madt_local_apic *lapic =
|
||||
container_of(entry, struct acpi_madt_local_apic, header);
|
||||
|
||||
if (ignore_disabled && !(lapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
if (lapic->processor_id != acpi_id)
|
||||
|
@ -48,13 +48,12 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
|
|||
}
|
||||
|
||||
static int map_x2apic_id(struct acpi_subtable_header *entry,
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
|
||||
bool ignore_disabled)
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
|
||||
{
|
||||
struct acpi_madt_local_x2apic *apic =
|
||||
container_of(entry, struct acpi_madt_local_x2apic, header);
|
||||
|
||||
if (ignore_disabled && !(apic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
if (device_declaration && (apic->uid == acpi_id)) {
|
||||
|
@ -66,13 +65,12 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
|
|||
}
|
||||
|
||||
static int map_lsapic_id(struct acpi_subtable_header *entry,
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
|
||||
bool ignore_disabled)
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
|
||||
{
|
||||
struct acpi_madt_local_sapic *lsapic =
|
||||
container_of(entry, struct acpi_madt_local_sapic, header);
|
||||
|
||||
if (ignore_disabled && !(lsapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
if (device_declaration) {
|
||||
|
@ -89,13 +87,12 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
|
|||
* Retrieve the ARM CPU physical identifier (MPIDR)
|
||||
*/
|
||||
static int map_gicc_mpidr(struct acpi_subtable_header *entry,
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr,
|
||||
bool ignore_disabled)
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
|
||||
{
|
||||
struct acpi_madt_generic_interrupt *gicc =
|
||||
container_of(entry, struct acpi_madt_generic_interrupt, header);
|
||||
|
||||
if (ignore_disabled && !(gicc->flags & ACPI_MADT_ENABLED))
|
||||
if (!(gicc->flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
/* device_declaration means Device object in DSDT, in the
|
||||
|
@ -112,7 +109,7 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry,
|
|||
}
|
||||
|
||||
static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
|
||||
int type, u32 acpi_id, bool ignore_disabled)
|
||||
int type, u32 acpi_id)
|
||||
{
|
||||
unsigned long madt_end, entry;
|
||||
phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */
|
||||
|
@ -130,20 +127,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
|
|||
struct acpi_subtable_header *header =
|
||||
(struct acpi_subtable_header *)entry;
|
||||
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
|
||||
if (!map_lapic_id(header, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
if (!map_lapic_id(header, acpi_id, &phys_id))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
|
||||
if (!map_x2apic_id(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
if (!map_x2apic_id(header, type, acpi_id, &phys_id))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
|
||||
if (!map_lsapic_id(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
if (!map_lsapic_id(header, type, acpi_id, &phys_id))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
|
||||
if (!map_gicc_mpidr(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
|
||||
break;
|
||||
}
|
||||
entry += header->length;
|
||||
|
@ -161,15 +154,14 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
|
|||
if (!madt)
|
||||
return PHYS_CPUID_INVALID;
|
||||
|
||||
rv = map_madt_entry(madt, 1, acpi_id, true);
|
||||
rv = map_madt_entry(madt, 1, acpi_id);
|
||||
|
||||
acpi_put_table((struct acpi_table_header *)madt);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
|
||||
bool ignore_disabled)
|
||||
static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
|
||||
{
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *obj;
|
||||
|
@ -190,38 +182,30 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
|
|||
|
||||
header = (struct acpi_subtable_header *)obj->buffer.pointer;
|
||||
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
|
||||
map_lapic_id(header, acpi_id, &phys_id, ignore_disabled);
|
||||
map_lapic_id(header, acpi_id, &phys_id);
|
||||
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
|
||||
map_lsapic_id(header, type, acpi_id, &phys_id, ignore_disabled);
|
||||
map_lsapic_id(header, type, acpi_id, &phys_id);
|
||||
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
|
||||
map_x2apic_id(header, type, acpi_id, &phys_id, ignore_disabled);
|
||||
map_x2apic_id(header, type, acpi_id, &phys_id);
|
||||
else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
|
||||
map_gicc_mpidr(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled);
|
||||
map_gicc_mpidr(header, type, acpi_id, &phys_id);
|
||||
|
||||
exit:
|
||||
kfree(buffer.pointer);
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
static phys_cpuid_t __acpi_get_phys_id(acpi_handle handle, int type,
|
||||
u32 acpi_id, bool ignore_disabled)
|
||||
phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
|
||||
{
|
||||
phys_cpuid_t phys_id;
|
||||
|
||||
phys_id = map_mat_entry(handle, type, acpi_id, ignore_disabled);
|
||||
phys_id = map_mat_entry(handle, type, acpi_id);
|
||||
if (invalid_phys_cpuid(phys_id))
|
||||
phys_id = map_madt_entry(get_madt_table(), type, acpi_id,
|
||||
ignore_disabled);
|
||||
phys_id = map_madt_entry(get_madt_table(), type, acpi_id);
|
||||
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
|
||||
{
|
||||
return __acpi_get_phys_id(handle, type, acpi_id, true);
|
||||
}
|
||||
|
||||
int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -278,79 +262,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
static bool __init
|
||||
map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid)
|
||||
{
|
||||
int type, id;
|
||||
u32 acpi_id;
|
||||
acpi_status status;
|
||||
acpi_object_type acpi_type;
|
||||
unsigned long long tmp;
|
||||
union acpi_object object = { 0 };
|
||||
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
|
||||
|
||||
status = acpi_get_type(handle, &acpi_type);
|
||||
if (ACPI_FAILURE(status))
|
||||
return false;
|
||||
|
||||
switch (acpi_type) {
|
||||
case ACPI_TYPE_PROCESSOR:
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
|
||||
if (ACPI_FAILURE(status))
|
||||
return false;
|
||||
acpi_id = object.processor.proc_id;
|
||||
|
||||
/* validate the acpi_id */
|
||||
if(acpi_processor_validate_proc_id(acpi_id))
|
||||
return false;
|
||||
break;
|
||||
case ACPI_TYPE_DEVICE:
|
||||
status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
|
||||
if (ACPI_FAILURE(status))
|
||||
return false;
|
||||
acpi_id = tmp;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
|
||||
|
||||
*phys_id = __acpi_get_phys_id(handle, type, acpi_id, false);
|
||||
id = acpi_map_cpuid(*phys_id, acpi_id);
|
||||
|
||||
if (id < 0)
|
||||
return false;
|
||||
*cpuid = id;
|
||||
return true;
|
||||
}
|
||||
|
||||
static acpi_status __init
|
||||
set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context,
|
||||
void **rv)
|
||||
{
|
||||
phys_cpuid_t phys_id;
|
||||
int cpu_id;
|
||||
|
||||
if (!map_processor(handle, &phys_id, &cpu_id))
|
||||
return AE_ERROR;
|
||||
|
||||
acpi_map_cpu2node(handle, cpu_id, phys_id);
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
void __init acpi_set_processor_mapping(void)
|
||||
{
|
||||
/* Set persistent cpu <-> node mapping for all processors. */
|
||||
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX, set_processor_node_mapping,
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
#else
|
||||
void __init acpi_set_processor_mapping(void) {}
|
||||
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
|
||||
static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
|
||||
u64 *phys_addr, int *ioapic_id)
|
||||
|
|
|
@ -639,11 +639,6 @@ int lock_device_hotplug_sysfs(void)
|
|||
return restart_syscall();
|
||||
}
|
||||
|
||||
void assert_held_device_hotplug(void)
|
||||
{
|
||||
lockdep_assert_held(&device_hotplug_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
static inline int device_is_not_partition(struct device *dev)
|
||||
{
|
||||
|
|
|
@ -344,7 +344,8 @@ config BT_WILINK
|
|||
|
||||
config BT_QCOMSMD
|
||||
tristate "Qualcomm SMD based HCI support"
|
||||
depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST
|
||||
depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
|
||||
depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n)
|
||||
select BT_QCA
|
||||
help
|
||||
Qualcomm SMD based HCI driver.
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/atmel_tc.h>
|
||||
#include <linux/sched_clock.h>
|
||||
|
||||
|
||||
/*
|
||||
|
@ -57,14 +56,9 @@ static u64 tc_get_cycles(struct clocksource *cs)
|
|||
return (upper << 16) | lower;
|
||||
}
|
||||
|
||||
static u32 tc_get_cv32(void)
|
||||
{
|
||||
return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
|
||||
}
|
||||
|
||||
static u64 tc_get_cycles32(struct clocksource *cs)
|
||||
{
|
||||
return tc_get_cv32();
|
||||
return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
|
||||
}
|
||||
|
||||
static struct clocksource clksrc = {
|
||||
|
@ -75,11 +69,6 @@ static struct clocksource clksrc = {
|
|||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static u64 notrace tc_read_sched_clock(void)
|
||||
{
|
||||
return tc_get_cv32();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||
|
||||
struct tc_clkevt_device {
|
||||
|
@ -350,9 +339,6 @@ static int __init tcb_clksrc_init(void)
|
|||
clksrc.read = tc_get_cycles32;
|
||||
/* setup ony channel 0 */
|
||||
tcb_setup_single_chan(tc, best_divisor_idx);
|
||||
|
||||
/* register sched_clock on chips with single 32 bit counter */
|
||||
sched_clock_register(tc_read_sched_clock, 32, divided_rate);
|
||||
} else {
|
||||
/* tclib will give us three clocks no matter what the
|
||||
* underlying platform supports.
|
||||
|
|
|
@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
|
|||
char *buf)
|
||||
{
|
||||
unsigned int cur_freq = __cpufreq_get(policy);
|
||||
if (!cur_freq)
|
||||
return sprintf(buf, "<unknown>");
|
||||
return sprintf(buf, "%u\n", cur_freq);
|
||||
|
||||
if (cur_freq)
|
||||
return sprintf(buf, "%u\n", cur_freq);
|
||||
|
||||
return sprintf(buf, "<unknown>\n");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -84,6 +84,11 @@ static inline u64 div_ext_fp(u64 x, u64 y)
|
|||
return div64_u64(x << EXT_FRAC_BITS, y);
|
||||
}
|
||||
|
||||
static inline int32_t percent_ext_fp(int percent)
|
||||
{
|
||||
return div_ext_fp(percent, 100);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct sample - Store performance sample
|
||||
* @core_avg_perf: Ratio of APERF/MPERF which is the actual average
|
||||
|
@ -845,12 +850,11 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
|
|||
|
||||
static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
|
||||
{
|
||||
int min, hw_min, max, hw_max, cpu, range, adj_range;
|
||||
int min, hw_min, max, hw_max, cpu;
|
||||
struct perf_limits *perf_limits = limits;
|
||||
u64 value, cap;
|
||||
|
||||
for_each_cpu(cpu, policy->cpus) {
|
||||
int max_perf_pct, min_perf_pct;
|
||||
struct cpudata *cpu_data = all_cpu_data[cpu];
|
||||
s16 epp;
|
||||
|
||||
|
@ -863,20 +867,15 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
|
|||
hw_max = HWP_GUARANTEED_PERF(cap);
|
||||
else
|
||||
hw_max = HWP_HIGHEST_PERF(cap);
|
||||
range = hw_max - hw_min;
|
||||
|
||||
max_perf_pct = perf_limits->max_perf_pct;
|
||||
min_perf_pct = perf_limits->min_perf_pct;
|
||||
min = fp_ext_toint(hw_max * perf_limits->min_perf);
|
||||
|
||||
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
|
||||
adj_range = min_perf_pct * range / 100;
|
||||
min = hw_min + adj_range;
|
||||
|
||||
value &= ~HWP_MIN_PERF(~0L);
|
||||
value |= HWP_MIN_PERF(min);
|
||||
|
||||
adj_range = max_perf_pct * range / 100;
|
||||
max = hw_min + adj_range;
|
||||
|
||||
max = fp_ext_toint(hw_max * perf_limits->max_perf);
|
||||
value &= ~HWP_MAX_PERF(~0L);
|
||||
value |= HWP_MAX_PERF(max);
|
||||
|
||||
|
@ -989,6 +988,7 @@ static void intel_pstate_update_policies(void)
|
|||
static int pid_param_set(void *data, u64 val)
|
||||
{
|
||||
*(u32 *)data = val;
|
||||
pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
|
||||
intel_pstate_reset_all_pid();
|
||||
return 0;
|
||||
}
|
||||
|
@ -1225,7 +1225,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
|
|||
limits->max_perf_pct);
|
||||
limits->max_perf_pct = max(limits->min_perf_pct,
|
||||
limits->max_perf_pct);
|
||||
limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
|
||||
limits->max_perf = percent_ext_fp(limits->max_perf_pct);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
|
||||
|
@ -1262,7 +1262,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
|
|||
limits->min_perf_pct);
|
||||
limits->min_perf_pct = min(limits->max_perf_pct,
|
||||
limits->min_perf_pct);
|
||||
limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
|
||||
limits->min_perf = percent_ext_fp(limits->min_perf_pct);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
|
||||
|
@ -2080,36 +2080,34 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
|
|||
static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
|
||||
struct perf_limits *limits)
|
||||
{
|
||||
int32_t max_policy_perf, min_policy_perf;
|
||||
|
||||
limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
|
||||
policy->cpuinfo.max_freq);
|
||||
limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
|
||||
max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq);
|
||||
max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
|
||||
if (policy->max == policy->min) {
|
||||
limits->min_policy_pct = limits->max_policy_pct;
|
||||
min_policy_perf = max_policy_perf;
|
||||
} else {
|
||||
limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
|
||||
policy->cpuinfo.max_freq);
|
||||
limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
|
||||
0, 100);
|
||||
min_policy_perf = div_ext_fp(policy->min,
|
||||
policy->cpuinfo.max_freq);
|
||||
min_policy_perf = clamp_t(int32_t, min_policy_perf,
|
||||
0, max_policy_perf);
|
||||
}
|
||||
|
||||
/* Normalize user input to [min_policy_pct, max_policy_pct] */
|
||||
limits->min_perf_pct = max(limits->min_policy_pct,
|
||||
limits->min_sysfs_pct);
|
||||
limits->min_perf_pct = min(limits->max_policy_pct,
|
||||
limits->min_perf_pct);
|
||||
limits->max_perf_pct = min(limits->max_policy_pct,
|
||||
limits->max_sysfs_pct);
|
||||
limits->max_perf_pct = max(limits->min_policy_pct,
|
||||
limits->max_perf_pct);
|
||||
/* Normalize user input to [min_perf, max_perf] */
|
||||
limits->min_perf = max(min_policy_perf,
|
||||
percent_ext_fp(limits->min_sysfs_pct));
|
||||
limits->min_perf = min(limits->min_perf, max_policy_perf);
|
||||
limits->max_perf = min(max_policy_perf,
|
||||
percent_ext_fp(limits->max_sysfs_pct));
|
||||
limits->max_perf = max(min_policy_perf, limits->max_perf);
|
||||
|
||||
/* Make sure min_perf_pct <= max_perf_pct */
|
||||
limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
|
||||
/* Make sure min_perf <= max_perf */
|
||||
limits->min_perf = min(limits->min_perf, limits->max_perf);
|
||||
|
||||
limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
|
||||
limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
|
||||
limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
|
||||
limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
|
||||
limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100);
|
||||
limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100);
|
||||
|
||||
pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
|
||||
limits->max_perf_pct, limits->min_perf_pct);
|
||||
|
|
|
@ -427,6 +427,7 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
|||
int rc = VM_FAULT_SIGBUS;
|
||||
phys_addr_t phys;
|
||||
pfn_t pfn;
|
||||
unsigned int fault_size = PAGE_SIZE;
|
||||
|
||||
if (check_vma(dax_dev, vmf->vma, __func__))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
@ -437,9 +438,12 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
|||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
if (fault_size != dax_region->align)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
|
||||
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
|
||||
vmf->pgoff);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
@ -464,6 +468,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
|||
phys_addr_t phys;
|
||||
pgoff_t pgoff;
|
||||
pfn_t pfn;
|
||||
unsigned int fault_size = PMD_SIZE;
|
||||
|
||||
if (check_vma(dax_dev, vmf->vma, __func__))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
@ -480,10 +485,20 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
|||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
if (fault_size < dax_region->align)
|
||||
return VM_FAULT_SIGBUS;
|
||||
else if (fault_size > dax_region->align)
|
||||
return VM_FAULT_FALLBACK;
|
||||
|
||||
/* if we are outside of the VMA */
|
||||
if (pmd_addr < vmf->vma->vm_start ||
|
||||
(pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
pgoff = linear_page_index(vmf->vma, pmd_addr);
|
||||
phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
|
||||
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
|
||||
pgoff);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
@ -503,6 +518,8 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
|||
phys_addr_t phys;
|
||||
pgoff_t pgoff;
|
||||
pfn_t pfn;
|
||||
unsigned int fault_size = PUD_SIZE;
|
||||
|
||||
|
||||
if (check_vma(dax_dev, vmf->vma, __func__))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
@ -519,10 +536,20 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
|||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
if (fault_size < dax_region->align)
|
||||
return VM_FAULT_SIGBUS;
|
||||
else if (fault_size > dax_region->align)
|
||||
return VM_FAULT_FALLBACK;
|
||||
|
||||
/* if we are outside of the VMA */
|
||||
if (pud_addr < vmf->vma->vm_start ||
|
||||
(pud_addr + PUD_SIZE) > vmf->vma->vm_end)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
pgoff = linear_page_index(vmf->vma, pud_addr);
|
||||
phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
|
||||
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
|
||||
pgoff);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ static int altr_a10sr_gpio_probe(struct platform_device *pdev)
|
|||
gpio->regmap = a10sr->regmap;
|
||||
|
||||
gpio->gp = altr_a10sr_gc;
|
||||
|
||||
gpio->gp.parent = pdev->dev.parent;
|
||||
gpio->gp.of_node = pdev->dev.of_node;
|
||||
|
||||
ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
|
||||
|
|
|
@ -90,21 +90,18 @@ static int altera_gpio_irq_set_type(struct irq_data *d,
|
|||
|
||||
altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
|
||||
|
||||
if (type == IRQ_TYPE_NONE)
|
||||
if (type == IRQ_TYPE_NONE) {
|
||||
irq_set_handler_locked(d, handle_bad_irq);
|
||||
return 0;
|
||||
if (type == IRQ_TYPE_LEVEL_HIGH &&
|
||||
altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH)
|
||||
}
|
||||
if (type == altera_gc->interrupt_trigger) {
|
||||
if (type == IRQ_TYPE_LEVEL_HIGH)
|
||||
irq_set_handler_locked(d, handle_level_irq);
|
||||
else
|
||||
irq_set_handler_locked(d, handle_simple_irq);
|
||||
return 0;
|
||||
if (type == IRQ_TYPE_EDGE_RISING &&
|
||||
altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING)
|
||||
return 0;
|
||||
if (type == IRQ_TYPE_EDGE_FALLING &&
|
||||
altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING)
|
||||
return 0;
|
||||
if (type == IRQ_TYPE_EDGE_BOTH &&
|
||||
altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH)
|
||||
return 0;
|
||||
|
||||
}
|
||||
irq_set_handler_locked(d, handle_bad_irq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -230,7 +227,6 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
|
|||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
|
||||
static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct altera_gpio_chip *altera_gc;
|
||||
|
@ -310,7 +306,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
|
|||
altera_gc->interrupt_trigger = reg;
|
||||
|
||||
ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0,
|
||||
handle_simple_irq, IRQ_TYPE_NONE);
|
||||
handle_bad_irq, IRQ_TYPE_NONE);
|
||||
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "could not add irqchip\n");
|
||||
|
|
|
@ -270,8 +270,10 @@ mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
|
|||
static irqreturn_t mcp23s08_irq(int irq, void *data)
|
||||
{
|
||||
struct mcp23s08 *mcp = data;
|
||||
int intcap, intf, i;
|
||||
int intcap, intf, i, gpio, gpio_orig, intcap_mask;
|
||||
unsigned int child_irq;
|
||||
bool intf_set, intcap_changed, gpio_bit_changed,
|
||||
defval_changed, gpio_set;
|
||||
|
||||
mutex_lock(&mcp->lock);
|
||||
if (mcp_read(mcp, MCP_INTF, &intf) < 0) {
|
||||
|
@ -287,14 +289,67 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
|
|||
}
|
||||
|
||||
mcp->cache[MCP_INTCAP] = intcap;
|
||||
|
||||
/* This clears the interrupt(configurable on S18) */
|
||||
if (mcp_read(mcp, MCP_GPIO, &gpio) < 0) {
|
||||
mutex_unlock(&mcp->lock);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
gpio_orig = mcp->cache[MCP_GPIO];
|
||||
mcp->cache[MCP_GPIO] = gpio;
|
||||
mutex_unlock(&mcp->lock);
|
||||
|
||||
if (mcp->cache[MCP_INTF] == 0) {
|
||||
/* There is no interrupt pending */
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
dev_dbg(mcp->chip.parent,
|
||||
"intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n",
|
||||
intcap, intf, gpio_orig, gpio);
|
||||
|
||||
for (i = 0; i < mcp->chip.ngpio; i++) {
|
||||
if ((BIT(i) & mcp->cache[MCP_INTF]) &&
|
||||
((BIT(i) & intcap & mcp->irq_rise) ||
|
||||
(mcp->irq_fall & ~intcap & BIT(i)) ||
|
||||
(BIT(i) & mcp->cache[MCP_INTCON]))) {
|
||||
/* We must check all of the inputs on the chip,
|
||||
* otherwise we may not notice a change on >=2 pins.
|
||||
*
|
||||
* On at least the mcp23s17, INTCAP is only updated
|
||||
* one byte at a time(INTCAPA and INTCAPB are
|
||||
* not written to at the same time - only on a per-bank
|
||||
* basis).
|
||||
*
|
||||
* INTF only contains the single bit that caused the
|
||||
* interrupt per-bank. On the mcp23s17, there is
|
||||
* INTFA and INTFB. If two pins are changed on the A
|
||||
* side at the same time, INTF will only have one bit
|
||||
* set. If one pin on the A side and one pin on the B
|
||||
* side are changed at the same time, INTF will have
|
||||
* two bits set. Thus, INTF can't be the only check
|
||||
* to see if the input has changed.
|
||||
*/
|
||||
|
||||
intf_set = BIT(i) & mcp->cache[MCP_INTF];
|
||||
if (i < 8 && intf_set)
|
||||
intcap_mask = 0x00FF;
|
||||
else if (i >= 8 && intf_set)
|
||||
intcap_mask = 0xFF00;
|
||||
else
|
||||
intcap_mask = 0x00;
|
||||
|
||||
intcap_changed = (intcap_mask &
|
||||
(BIT(i) & mcp->cache[MCP_INTCAP])) !=
|
||||
(intcap_mask & (BIT(i) & gpio_orig));
|
||||
gpio_set = BIT(i) & mcp->cache[MCP_GPIO];
|
||||
gpio_bit_changed = (BIT(i) & gpio_orig) !=
|
||||
(BIT(i) & mcp->cache[MCP_GPIO]);
|
||||
defval_changed = (BIT(i) & mcp->cache[MCP_INTCON]) &&
|
||||
((BIT(i) & mcp->cache[MCP_GPIO]) !=
|
||||
(BIT(i) & mcp->cache[MCP_DEFVAL]));
|
||||
|
||||
if (((gpio_bit_changed || intcap_changed) &&
|
||||
(BIT(i) & mcp->irq_rise) && gpio_set) ||
|
||||
((gpio_bit_changed || intcap_changed) &&
|
||||
(BIT(i) & mcp->irq_fall) && !gpio_set) ||
|
||||
defval_changed) {
|
||||
child_irq = irq_find_mapping(mcp->chip.irqdomain, i);
|
||||
handle_nested_irq(child_irq);
|
||||
}
|
||||
|
|
|
@ -197,7 +197,7 @@ static ssize_t gpio_mockup_event_write(struct file *file,
|
|||
struct seq_file *sfile;
|
||||
struct gpio_desc *desc;
|
||||
struct gpio_chip *gc;
|
||||
int status, val;
|
||||
int val;
|
||||
char buf;
|
||||
|
||||
sfile = file->private_data;
|
||||
|
@ -206,9 +206,8 @@ static ssize_t gpio_mockup_event_write(struct file *file,
|
|||
chip = priv->chip;
|
||||
gc = &chip->gc;
|
||||
|
||||
status = copy_from_user(&buf, usr_buf, 1);
|
||||
if (status)
|
||||
return status;
|
||||
if (copy_from_user(&buf, usr_buf, 1))
|
||||
return -EFAULT;
|
||||
|
||||
if (buf == '0')
|
||||
val = 0;
|
||||
|
|
|
@ -42,9 +42,7 @@ struct xgene_gpio {
|
|||
struct gpio_chip chip;
|
||||
void __iomem *base;
|
||||
spinlock_t lock;
|
||||
#ifdef CONFIG_PM
|
||||
u32 set_dr_val[XGENE_MAX_GPIO_BANKS];
|
||||
#endif
|
||||
};
|
||||
|
||||
static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset)
|
||||
|
@ -138,8 +136,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int xgene_gpio_suspend(struct device *dev)
|
||||
static __maybe_unused int xgene_gpio_suspend(struct device *dev)
|
||||
{
|
||||
struct xgene_gpio *gpio = dev_get_drvdata(dev);
|
||||
unsigned long bank_offset;
|
||||
|
@ -152,7 +149,7 @@ static int xgene_gpio_suspend(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int xgene_gpio_resume(struct device *dev)
|
||||
static __maybe_unused int xgene_gpio_resume(struct device *dev)
|
||||
{
|
||||
struct xgene_gpio *gpio = dev_get_drvdata(dev);
|
||||
unsigned long bank_offset;
|
||||
|
@ -166,10 +163,6 @@ static int xgene_gpio_resume(struct device *dev)
|
|||
}
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
|
||||
#define XGENE_GPIO_PM_OPS (&xgene_gpio_pm)
|
||||
#else
|
||||
#define XGENE_GPIO_PM_OPS NULL
|
||||
#endif
|
||||
|
||||
static int xgene_gpio_probe(struct platform_device *pdev)
|
||||
{
|
||||
|
@ -241,7 +234,7 @@ static struct platform_driver xgene_gpio_driver = {
|
|||
.name = "xgene-gpio",
|
||||
.of_match_table = xgene_gpio_of_match,
|
||||
.acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match),
|
||||
.pm = XGENE_GPIO_PM_OPS,
|
||||
.pm = &xgene_gpio_pm,
|
||||
},
|
||||
.probe = xgene_gpio_probe,
|
||||
};
|
||||
|
|
|
@ -3,6 +3,4 @@
|
|||
# of AMDSOC/AMDGPU drm driver.
|
||||
# It provides the HW control for ACP related functionalities.
|
||||
|
||||
subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
|
||||
|
||||
AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
|
||||
|
|
|
@ -240,6 +240,8 @@ free_partial_kdata:
|
|||
for (; i >= 0; i--)
|
||||
drm_free_large(p->chunks[i].kdata);
|
||||
kfree(p->chunks);
|
||||
p->chunks = NULL;
|
||||
p->nchunks = 0;
|
||||
put_ctx:
|
||||
amdgpu_ctx_put(p->ctx);
|
||||
free_chunk:
|
||||
|
|
|
@ -2590,7 +2590,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
|
|||
use_bank = 0;
|
||||
}
|
||||
|
||||
*pos &= 0x3FFFF;
|
||||
*pos &= (1UL << 22) - 1;
|
||||
|
||||
if (use_bank) {
|
||||
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
|
||||
|
@ -2666,7 +2666,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
|
|||
use_bank = 0;
|
||||
}
|
||||
|
||||
*pos &= 0x3FFFF;
|
||||
*pos &= (1UL << 22) - 1;
|
||||
|
||||
if (use_bank) {
|
||||
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
|
||||
|
|
|
@ -3464,6 +3464,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
|||
(adev->pdev->device == 0x6667)) {
|
||||
max_sclk = 75000;
|
||||
}
|
||||
} else if (adev->asic_type == CHIP_OLAND) {
|
||||
if ((adev->pdev->device == 0x6604) &&
|
||||
(adev->pdev->subsystem_vendor == 0x1028) &&
|
||||
(adev->pdev->subsystem_device == 0x066F)) {
|
||||
max_sclk = 75000;
|
||||
}
|
||||
}
|
||||
|
||||
if (rps->vce_active) {
|
||||
|
|
|
@ -1051,7 +1051,7 @@ static int vi_common_early_init(void *handle)
|
|||
/* rev0 hardware requires workarounds to support PG */
|
||||
adev->pg_flags = 0;
|
||||
if (adev->rev_id != 0x00) {
|
||||
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||
adev->pg_flags |=
|
||||
AMD_PG_SUPPORT_GFX_SMG |
|
||||
AMD_PG_SUPPORT_GFX_PIPELINE |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
|
|
|
@ -178,7 +178,7 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
|||
if (bgate) {
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
AMD_PG_STATE_GATE);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
|
|
|
@ -63,8 +63,7 @@ static void malidp_crtc_enable(struct drm_crtc *crtc)
|
|||
|
||||
clk_prepare_enable(hwdev->pxlclk);
|
||||
|
||||
/* mclk needs to be set to the same or higher rate than pxlclk */
|
||||
clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
|
||||
/* We rely on firmware to set mclk to a sensible level. */
|
||||
clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
|
||||
|
||||
hwdev->modeset(hwdev, &vm);
|
||||
|
|
|
@ -83,7 +83,7 @@ static const struct malidp_layer malidp550_layers[] = {
|
|||
{ DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
|
||||
{ DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
|
||||
{ DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
|
||||
{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 },
|
||||
{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
|
||||
};
|
||||
|
||||
#define MALIDP_DE_DEFAULT_PREFETCH_START 5
|
||||
|
|
|
@ -37,6 +37,8 @@
|
|||
#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
|
||||
#define MALIDP_LAYER_COMP_SIZE 0x010
|
||||
#define MALIDP_LAYER_OFFSET 0x014
|
||||
#define MALIDP550_LS_ENABLE 0x01c
|
||||
#define MALIDP550_LS_R1_IN_SIZE 0x020
|
||||
|
||||
/*
|
||||
* This 4-entry look-up-table is used to determine the full 8-bit alpha value
|
||||
|
@ -242,6 +244,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
|
|||
LAYER_V_VAL(plane->state->crtc_y),
|
||||
mp->layer->base + MALIDP_LAYER_OFFSET);
|
||||
|
||||
if (mp->layer->id == DE_SMART)
|
||||
malidp_hw_write(mp->hwdev,
|
||||
LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
|
||||
mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
|
||||
|
||||
/* first clear the rotation bits */
|
||||
val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
|
||||
val &= ~LAYER_ROT_MASK;
|
||||
|
@ -330,9 +337,16 @@ int malidp_de_planes_init(struct drm_device *drm)
|
|||
plane->hwdev = malidp->dev;
|
||||
plane->layer = &map->layers[i];
|
||||
|
||||
/* Skip the features which the SMART layer doesn't have */
|
||||
if (id == DE_SMART)
|
||||
if (id == DE_SMART) {
|
||||
/*
|
||||
* Enable the first rectangle in the SMART layer to be
|
||||
* able to use it as a drm plane.
|
||||
*/
|
||||
malidp_hw_write(malidp->dev, 1,
|
||||
plane->layer->base + MALIDP550_LS_ENABLE);
|
||||
/* Skip the features which the SMART layer doesn't have. */
|
||||
continue;
|
||||
}
|
||||
|
||||
drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
|
||||
malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
|
||||
|
|
|
@ -84,6 +84,7 @@
|
|||
/* Stride register offsets relative to Lx_BASE */
|
||||
#define MALIDP_DE_LG_STRIDE 0x18
|
||||
#define MALIDP_DE_LV_STRIDE0 0x18
|
||||
#define MALIDP550_DE_LS_R1_STRIDE 0x28
|
||||
|
||||
/* macros to set values into registers */
|
||||
#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0)
|
||||
|
|
|
@ -293,6 +293,7 @@ enum plane_id {
|
|||
PLANE_PRIMARY,
|
||||
PLANE_SPRITE0,
|
||||
PLANE_SPRITE1,
|
||||
PLANE_SPRITE2,
|
||||
PLANE_CURSOR,
|
||||
I915_MAX_PLANES,
|
||||
};
|
||||
|
|
|
@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
|
||||
|
||||
ret = -ENODEV;
|
||||
if (obj->ops->pwrite)
|
||||
ret = obj->ops->pwrite(obj, args);
|
||||
if (ret != -ENODEV)
|
||||
goto err;
|
||||
|
||||
ret = i915_gem_object_wait(obj,
|
||||
I915_WAIT_INTERRUPTIBLE |
|
||||
I915_WAIT_ALL,
|
||||
|
@ -2119,6 +2125,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
|
|||
*/
|
||||
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
|
||||
obj->mm.madv = __I915_MADV_PURGED;
|
||||
obj->mm.pages = ERR_PTR(-EFAULT);
|
||||
}
|
||||
|
||||
/* Try to discard unwanted pages */
|
||||
|
@ -2218,7 +2225,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
|
|||
|
||||
__i915_gem_object_reset_page_iter(obj);
|
||||
|
||||
obj->ops->put_pages(obj, pages);
|
||||
if (!IS_ERR(pages))
|
||||
obj->ops->put_pages(obj, pages);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
}
|
||||
|
@ -2437,7 +2446,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (unlikely(!obj->mm.pages)) {
|
||||
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
|
||||
err = ____i915_gem_object_get_pages(obj);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
@ -2515,7 +2524,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
|||
|
||||
pinned = true;
|
||||
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
|
||||
if (unlikely(!obj->mm.pages)) {
|
||||
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
|
||||
ret = ____i915_gem_object_get_pages(obj);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
|
@ -2563,6 +2572,75 @@ err_unlock:
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
|
||||
const struct drm_i915_gem_pwrite *arg)
|
||||
{
|
||||
struct address_space *mapping = obj->base.filp->f_mapping;
|
||||
char __user *user_data = u64_to_user_ptr(arg->data_ptr);
|
||||
u64 remain, offset;
|
||||
unsigned int pg;
|
||||
|
||||
/* Before we instantiate/pin the backing store for our use, we
|
||||
* can prepopulate the shmemfs filp efficiently using a write into
|
||||
* the pagecache. We avoid the penalty of instantiating all the
|
||||
* pages, important if the user is just writing to a few and never
|
||||
* uses the object on the GPU, and using a direct write into shmemfs
|
||||
* allows it to avoid the cost of retrieving a page (either swapin
|
||||
* or clearing-before-use) before it is overwritten.
|
||||
*/
|
||||
if (READ_ONCE(obj->mm.pages))
|
||||
return -ENODEV;
|
||||
|
||||
/* Before the pages are instantiated the object is treated as being
|
||||
* in the CPU domain. The pages will be clflushed as required before
|
||||
* use, and we can freely write into the pages directly. If userspace
|
||||
* races pwrite with any other operation; corruption will ensue -
|
||||
* that is userspace's prerogative!
|
||||
*/
|
||||
|
||||
remain = arg->size;
|
||||
offset = arg->offset;
|
||||
pg = offset_in_page(offset);
|
||||
|
||||
do {
|
||||
unsigned int len, unwritten;
|
||||
struct page *page;
|
||||
void *data, *vaddr;
|
||||
int err;
|
||||
|
||||
len = PAGE_SIZE - pg;
|
||||
if (len > remain)
|
||||
len = remain;
|
||||
|
||||
err = pagecache_write_begin(obj->base.filp, mapping,
|
||||
offset, len, 0,
|
||||
&page, &data);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
vaddr = kmap(page);
|
||||
unwritten = copy_from_user(vaddr + pg, user_data, len);
|
||||
kunmap(page);
|
||||
|
||||
err = pagecache_write_end(obj->base.filp, mapping,
|
||||
offset, len, len - unwritten,
|
||||
page, data);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (unwritten)
|
||||
return -EFAULT;
|
||||
|
||||
remain -= len;
|
||||
user_data += len;
|
||||
offset += len;
|
||||
pg = 0;
|
||||
} while (remain);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool ban_context(const struct i915_gem_context *ctx)
|
||||
{
|
||||
return (i915_gem_context_is_bannable(ctx) &&
|
||||
|
@ -3029,6 +3107,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||
args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
|
||||
if (args->timeout_ns < 0)
|
||||
args->timeout_ns = 0;
|
||||
|
||||
/*
|
||||
* Apparently ktime isn't accurate enough and occasionally has a
|
||||
* bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
|
||||
* things up to make the test happy. We allow up to 1 jiffy.
|
||||
*
|
||||
* This is a regression from the timespec->ktime conversion.
|
||||
*/
|
||||
if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
|
||||
args->timeout_ns = 0;
|
||||
}
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
|
@ -3974,8 +4062,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
|||
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
|
||||
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
|
||||
I915_GEM_OBJECT_IS_SHRINKABLE,
|
||||
|
||||
.get_pages = i915_gem_object_get_pages_gtt,
|
||||
.put_pages = i915_gem_object_put_pages_gtt,
|
||||
|
||||
.pwrite = i915_gem_object_pwrite_gtt,
|
||||
};
|
||||
|
||||
struct drm_i915_gem_object *
|
||||
|
|
|
@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
|
|||
* those as well to make room for our guard pages.
|
||||
*/
|
||||
if (check_color) {
|
||||
if (vma->node.start + vma->node.size == node->start) {
|
||||
if (vma->node.color == node->color)
|
||||
if (node->start + node->size == target->start) {
|
||||
if (node->color == target->color)
|
||||
continue;
|
||||
}
|
||||
if (vma->node.start == node->start + node->size) {
|
||||
if (vma->node.color == node->color)
|
||||
if (node->start == target->start + target->size) {
|
||||
if (node->color == target->color)
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops {
|
|||
struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
|
||||
void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
|
||||
|
||||
int (*pwrite)(struct drm_i915_gem_object *,
|
||||
const struct drm_i915_gem_pwrite *);
|
||||
|
||||
int (*dmabuf_export)(struct drm_i915_gem_object *);
|
||||
void (*release)(struct drm_i915_gem_object *);
|
||||
};
|
||||
|
|
|
@ -512,10 +512,36 @@ err_unpin:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_vma_remove(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
|
||||
|
||||
drm_mm_remove_node(&vma->node);
|
||||
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
|
||||
|
||||
/* Since the unbound list is global, only move to that list if
|
||||
* no more VMAs exist.
|
||||
*/
|
||||
if (--obj->bind_count == 0)
|
||||
list_move_tail(&obj->global_link,
|
||||
&to_i915(obj->base.dev)->mm.unbound_list);
|
||||
|
||||
/* And finally now the object is completely decoupled from this vma,
|
||||
* we can drop its hold on the backing storage and allow it to be
|
||||
* reaped by the shrinker.
|
||||
*/
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
|
||||
}
|
||||
|
||||
int __i915_vma_do_pin(struct i915_vma *vma,
|
||||
u64 size, u64 alignment, u64 flags)
|
||||
{
|
||||
unsigned int bound = vma->flags;
|
||||
const unsigned int bound = vma->flags;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
|
||||
|
@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,
|
|||
|
||||
if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
|
||||
ret = -EBUSY;
|
||||
goto err;
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
if ((bound & I915_VMA_BIND_MASK) == 0) {
|
||||
ret = i915_vma_insert(vma, size, alignment, flags);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto err_remove;
|
||||
|
||||
if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
|
||||
__i915_vma_set_map_and_fenceable(vma);
|
||||
|
@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
|
|||
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
|
||||
return 0;
|
||||
|
||||
err:
|
||||
err_remove:
|
||||
if ((bound & I915_VMA_BIND_MASK) == 0) {
|
||||
GEM_BUG_ON(vma->pages);
|
||||
i915_vma_remove(vma);
|
||||
}
|
||||
err_unpin:
|
||||
__i915_vma_unpin(vma);
|
||||
return ret;
|
||||
}
|
||||
|
@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||
}
|
||||
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
|
||||
|
||||
drm_mm_remove_node(&vma->node);
|
||||
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
|
||||
|
||||
if (vma->pages != obj->mm.pages) {
|
||||
GEM_BUG_ON(!vma->pages);
|
||||
sg_free_table(vma->pages);
|
||||
|
@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||
}
|
||||
vma->pages = NULL;
|
||||
|
||||
/* Since the unbound list is global, only move to that list if
|
||||
* no more VMAs exist. */
|
||||
if (--obj->bind_count == 0)
|
||||
list_move_tail(&obj->global_link,
|
||||
&to_i915(obj->base.dev)->mm.unbound_list);
|
||||
|
||||
/* And finally now the object is completely decoupled from this vma,
|
||||
* we can drop its hold on the backing storage and allow it to be
|
||||
* reaped by the shrinker.
|
||||
*/
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
|
||||
i915_vma_remove(vma);
|
||||
|
||||
destroy:
|
||||
if (unlikely(i915_vma_is_closed(vma)))
|
||||
|
|
|
@ -3669,10 +3669,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
|
|||
/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
|
||||
crtc->base.mode = crtc->base.state->mode;
|
||||
|
||||
DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
|
||||
old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
|
||||
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
|
||||
|
||||
/*
|
||||
* Update pipe size and adjust fitter if needed: the reason for this is
|
||||
* that in compute_mode_changes we check the native mode (not the pfit
|
||||
|
@ -4796,23 +4792,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
|
|||
struct intel_crtc_scaler_state *scaler_state =
|
||||
&crtc->config->scaler_state;
|
||||
|
||||
DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
|
||||
|
||||
if (crtc->config->pch_pfit.enabled) {
|
||||
int id;
|
||||
|
||||
if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
|
||||
DRM_ERROR("Requesting pfit without getting a scaler first\n");
|
||||
if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
|
||||
return;
|
||||
}
|
||||
|
||||
id = scaler_state->scaler_id;
|
||||
I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
|
||||
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
|
||||
I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
|
||||
I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
|
||||
|
||||
DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -14379,6 +14369,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
|
|||
} while (progress);
|
||||
}
|
||||
|
||||
static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_atomic_state *state, *next;
|
||||
struct llist_node *freed;
|
||||
|
||||
freed = llist_del_all(&dev_priv->atomic_helper.free_list);
|
||||
llist_for_each_entry_safe(state, next, freed, freed)
|
||||
drm_atomic_state_put(&state->base);
|
||||
}
|
||||
|
||||
static void intel_atomic_helper_free_state_worker(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), atomic_helper.free_work);
|
||||
|
||||
intel_atomic_helper_free_state(dev_priv);
|
||||
}
|
||||
|
||||
static void intel_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_device *dev = state->dev;
|
||||
|
@ -14545,6 +14553,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
* can happen also when the device is completely off.
|
||||
*/
|
||||
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
|
||||
|
||||
intel_atomic_helper_free_state(dev_priv);
|
||||
}
|
||||
|
||||
static void intel_atomic_commit_work(struct work_struct *work)
|
||||
|
@ -14946,17 +14956,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
|
|||
to_intel_atomic_state(old_crtc_state->state);
|
||||
bool modeset = needs_modeset(crtc->state);
|
||||
|
||||
if (!modeset &&
|
||||
(intel_cstate->base.color_mgmt_changed ||
|
||||
intel_cstate->update_pipe)) {
|
||||
intel_color_set_csc(crtc->state);
|
||||
intel_color_load_luts(crtc->state);
|
||||
}
|
||||
|
||||
/* Perform vblank evasion around commit operation */
|
||||
intel_pipe_update_start(intel_crtc);
|
||||
|
||||
if (modeset)
|
||||
goto out;
|
||||
|
||||
if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
|
||||
intel_color_set_csc(crtc->state);
|
||||
intel_color_load_luts(crtc->state);
|
||||
}
|
||||
|
||||
if (intel_cstate->update_pipe)
|
||||
intel_update_pipe_config(intel_crtc, old_intel_cstate);
|
||||
else if (INTEL_GEN(dev_priv) >= 9)
|
||||
|
@ -16599,18 +16611,6 @@ fail:
|
|||
drm_modeset_acquire_fini(&ctx);
|
||||
}
|
||||
|
||||
static void intel_atomic_helper_free_state(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), atomic_helper.free_work);
|
||||
struct intel_atomic_state *state, *next;
|
||||
struct llist_node *freed;
|
||||
|
||||
freed = llist_del_all(&dev_priv->atomic_helper.free_list);
|
||||
llist_for_each_entry_safe(state, next, freed, freed)
|
||||
drm_atomic_state_put(&state->base);
|
||||
}
|
||||
|
||||
int intel_modeset_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
@ -16631,7 +16631,7 @@ int intel_modeset_init(struct drm_device *dev)
|
|||
dev->mode_config.funcs = &intel_mode_funcs;
|
||||
|
||||
INIT_WORK(&dev_priv->atomic_helper.free_work,
|
||||
intel_atomic_helper_free_state);
|
||||
intel_atomic_helper_free_state_worker);
|
||||
|
||||
intel_init_quirks(dev);
|
||||
|
||||
|
|
|
@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
bool *enabled, int width, int height)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
|
||||
unsigned long conn_configured, mask;
|
||||
unsigned long conn_configured, conn_seq, mask;
|
||||
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
|
||||
int i, j;
|
||||
bool *save_enabled;
|
||||
bool fallback = true;
|
||||
int num_connectors_enabled = 0;
|
||||
int num_connectors_detected = 0;
|
||||
int pass = 0;
|
||||
|
||||
save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
|
||||
if (!save_enabled)
|
||||
|
@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
mask = BIT(count) - 1;
|
||||
conn_configured = 0;
|
||||
retry:
|
||||
conn_seq = conn_configured;
|
||||
for (i = 0; i < count; i++) {
|
||||
struct drm_fb_helper_connector *fb_conn;
|
||||
struct drm_connector *connector;
|
||||
|
@ -387,7 +387,7 @@ retry:
|
|||
if (conn_configured & BIT(i))
|
||||
continue;
|
||||
|
||||
if (pass == 0 && !connector->has_tile)
|
||||
if (conn_seq == 0 && !connector->has_tile)
|
||||
continue;
|
||||
|
||||
if (connector->status == connector_status_connected)
|
||||
|
@ -498,10 +498,8 @@ retry:
|
|||
conn_configured |= BIT(i);
|
||||
}
|
||||
|
||||
if ((conn_configured & mask) != mask) {
|
||||
pass++;
|
||||
if ((conn_configured & mask) != mask && conn_configured != conn_seq)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the BIOS didn't enable everything it could, fall back to have the
|
||||
|
|
|
@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
|
|||
break;
|
||||
}
|
||||
|
||||
/* When byt can survive without system hang with dynamic
|
||||
* sw freq adjustments, this restriction can be lifted.
|
||||
*/
|
||||
if (IS_VALLEYVIEW(dev_priv))
|
||||
goto skip_hw_write;
|
||||
|
||||
I915_WRITE(GEN6_RP_UP_EI,
|
||||
GT_INTERVAL_FROM_US(dev_priv, ei_up));
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD,
|
||||
|
@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
|
|||
GEN6_RP_UP_BUSY_AVG |
|
||||
GEN6_RP_DOWN_IDLE_AVG);
|
||||
|
||||
skip_hw_write:
|
||||
dev_priv->rps.power = new_power;
|
||||
dev_priv->rps.up_threshold = threshold_up;
|
||||
dev_priv->rps.down_threshold = threshold_down;
|
||||
|
@ -7916,10 +7923,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
|
|||
* @timeout_base_ms: timeout for polling with preemption enabled
|
||||
*
|
||||
* Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
|
||||
* reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
|
||||
* reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
|
||||
* The request is acknowledged once the PCODE reply dword equals @reply after
|
||||
* applying @reply_mask. Polling is first attempted with preemption enabled
|
||||
* for @timeout_base_ms and if this times out for another 10 ms with
|
||||
* for @timeout_base_ms and if this times out for another 50 ms with
|
||||
* preemption disabled.
|
||||
*
|
||||
* Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
|
||||
|
@ -7955,14 +7962,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
|
|||
* worst case) _and_ PCODE was busy for some reason even after a
|
||||
* (queued) request and @timeout_base_ms delay. As a workaround retry
|
||||
* the poll with preemption disabled to maximize the number of
|
||||
* requests. Increase the timeout from @timeout_base_ms to 10ms to
|
||||
* requests. Increase the timeout from @timeout_base_ms to 50ms to
|
||||
* account for interrupts that could reduce the number of these
|
||||
* requests.
|
||||
* requests, and for any quirks of the PCODE firmware that delays
|
||||
* the request completion.
|
||||
*/
|
||||
DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
|
||||
WARN_ON_ONCE(timeout_base_ms > 3);
|
||||
preempt_disable();
|
||||
ret = wait_for_atomic(COND, 10);
|
||||
ret = wait_for_atomic(COND, 50);
|
||||
preempt_enable();
|
||||
|
||||
out:
|
||||
|
|
|
@ -254,9 +254,6 @@ skl_update_plane(struct drm_plane *drm_plane,
|
|||
int scaler_id = plane_state->scaler_id;
|
||||
const struct intel_scaler *scaler;
|
||||
|
||||
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
|
||||
plane_id, PS_PLANE_SEL(plane_id));
|
||||
|
||||
scaler = &crtc_state->scaler_state.scalers[scaler_id];
|
||||
|
||||
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
|
||||
|
|
|
@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
|
|||
|
||||
for_each_fw_domain_masked(d, fw_domains, dev_priv)
|
||||
fw_domain_wait_ack(d);
|
||||
|
||||
dev_priv->uncore.fw_domains_active |= fw_domains;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
|
|||
fw_domain_put(d);
|
||||
fw_domain_posting_read(d);
|
||||
}
|
||||
|
||||
dev_priv->uncore.fw_domains_active &= ~fw_domains;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
|
|||
if (WARN_ON(domain->wake_count == 0))
|
||||
domain->wake_count++;
|
||||
|
||||
if (--domain->wake_count == 0) {
|
||||
if (--domain->wake_count == 0)
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
|
||||
dev_priv->uncore.fw_domains_active &= ~domain->mask;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
|
@ -454,10 +456,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
|
|||
fw_domains &= ~domain->mask;
|
||||
}
|
||||
|
||||
if (fw_domains) {
|
||||
if (fw_domains)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
|
||||
dev_priv->uncore.fw_domains_active |= fw_domains;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -968,7 +968,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
|
|||
fw_domain_arm_timer(domain);
|
||||
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
|
||||
dev_priv->uncore.fw_domains_active |= fw_domains;
|
||||
}
|
||||
|
||||
static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
|
||||
|
|
|
@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
|
|||
struct drm_gem_object *obj = buffer->priv;
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(!obj->filp))
|
||||
return -EINVAL;
|
||||
|
||||
ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
|
@ -2984,6 +2984,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
|||
(rdev->pdev->device == 0x6667)) {
|
||||
max_sclk = 75000;
|
||||
}
|
||||
} else if (rdev->family == CHIP_OLAND) {
|
||||
if ((rdev->pdev->device == 0x6604) &&
|
||||
(rdev->pdev->subsystem_vendor == 0x1028) &&
|
||||
(rdev->pdev->subsystem_device == 0x066F)) {
|
||||
max_sclk = 75000;
|
||||
}
|
||||
}
|
||||
|
||||
if (rps->vce_active) {
|
||||
|
|
|
@ -464,6 +464,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
|
|||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
|
||||
mutex_lock(&tilcdc_crtc->enable_lock);
|
||||
|
@ -484,7 +485,17 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
|
|||
tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
|
||||
LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
|
||||
LCDC_PALETTE_LOAD_MODE_MASK);
|
||||
|
||||
/* There is no real chance for a race here as the time stamp
|
||||
* is taken before the raster DMA is started. The spin-lock is
|
||||
* taken to have a memory barrier after taking the time-stamp
|
||||
* and to avoid a context switch between taking the stamp and
|
||||
* enabling the raster.
|
||||
*/
|
||||
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
|
||||
tilcdc_crtc->last_vblank = ktime_get();
|
||||
tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
|
||||
spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
|
||||
|
||||
drm_crtc_vblank_on(crtc);
|
||||
|
||||
|
@ -539,7 +550,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
|
|||
}
|
||||
|
||||
drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
|
||||
tilcdc_crtc->last_vblank = 0;
|
||||
|
||||
tilcdc_crtc->enabled = false;
|
||||
mutex_unlock(&tilcdc_crtc->enable_lock);
|
||||
|
@ -602,7 +612,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
|
|||
{
|
||||
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
|
||||
|
||||
|
@ -614,28 +623,30 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
|
|||
drm_framebuffer_reference(fb);
|
||||
|
||||
crtc->primary->fb = fb;
|
||||
tilcdc_crtc->event = event;
|
||||
|
||||
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
|
||||
mutex_lock(&tilcdc_crtc->enable_lock);
|
||||
|
||||
if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
|
||||
if (tilcdc_crtc->enabled) {
|
||||
unsigned long flags;
|
||||
ktime_t next_vblank;
|
||||
s64 tdiff;
|
||||
|
||||
next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
|
||||
1000000 / crtc->hwmode.vrefresh);
|
||||
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
|
||||
|
||||
next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
|
||||
1000000 / crtc->hwmode.vrefresh);
|
||||
tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
|
||||
|
||||
if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
|
||||
tilcdc_crtc->next_fb = fb;
|
||||
else
|
||||
set_scanout(crtc, fb);
|
||||
|
||||
spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
|
||||
}
|
||||
|
||||
if (tilcdc_crtc->next_fb != fb)
|
||||
set_scanout(crtc, fb);
|
||||
|
||||
tilcdc_crtc->event = event;
|
||||
|
||||
spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
|
||||
mutex_unlock(&tilcdc_crtc->enable_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1036,5 +1047,5 @@ int tilcdc_crtc_create(struct drm_device *dev)
|
|||
|
||||
fail:
|
||||
tilcdc_crtc_destroy(crtc);
|
||||
return -ENOMEM;
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -175,11 +175,11 @@ config HID_CHERRY
|
|||
Support for Cherry Cymotion keyboard.
|
||||
|
||||
config HID_CHICONY
|
||||
tristate "Chicony Tactical pad"
|
||||
tristate "Chicony devices"
|
||||
depends on HID
|
||||
default !EXPERT
|
||||
---help---
|
||||
Support for Chicony Tactical pad.
|
||||
Support for Chicony Tactical pad and special keys on Chicony keyboards.
|
||||
|
||||
config HID_CORSAIR
|
||||
tristate "Corsair devices"
|
||||
|
@ -190,6 +190,7 @@ config HID_CORSAIR
|
|||
|
||||
Supported devices:
|
||||
- Vengeance K90
|
||||
- Scimitar PRO RGB
|
||||
|
||||
config HID_PRODIKEYS
|
||||
tristate "Prodikeys PC-MIDI Keyboard support"
|
||||
|
|
|
@ -86,6 +86,7 @@ static const struct hid_device_id ch_devices[] = {
|
|||
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(hid, ch_devices);
|
||||
|
|
|
@ -1870,6 +1870,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
|
|||
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
|
||||
|
@ -1910,6 +1911,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
|
|||
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
|
||||
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
|
||||
|
|
|
@ -3,8 +3,10 @@
|
|||
*
|
||||
* Supported devices:
|
||||
* - Vengeance K90 Keyboard
|
||||
* - Scimitar PRO RGB Gaming Mouse
|
||||
*
|
||||
* Copyright (c) 2015 Clement Vuchener
|
||||
* Copyright (c) 2017 Oscar Campos
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -670,10 +672,51 @@ static int corsair_input_mapping(struct hid_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The report descriptor of Corsair Scimitar RGB Pro gaming mouse is
|
||||
* non parseable as they define two consecutive Logical Minimum for
|
||||
* the Usage Page (Consumer) in rdescs bytes 75 and 77 being 77 0x16
|
||||
* that should be obviousy 0x26 for Logical Magimum of 16 bits. This
|
||||
* prevents poper parsing of the report descriptor due Logical
|
||||
* Minimum being larger than Logical Maximum.
|
||||
*
|
||||
* This driver fixes the report descriptor for:
|
||||
* - USB ID b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse
|
||||
*/
|
||||
|
||||
static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
||||
unsigned int *rsize)
|
||||
{
|
||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||
|
||||
if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
|
||||
/*
|
||||
* Corsair Scimitar RGB Pro report descriptor is broken and
|
||||
* defines two different Logical Minimum for the Consumer
|
||||
* Application. The byte 77 should be a 0x26 defining a 16
|
||||
* bits integer for the Logical Maximum but it is a 0x16
|
||||
* instead (Logical Minimum)
|
||||
*/
|
||||
switch (hdev->product) {
|
||||
case USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB:
|
||||
if (*rsize >= 172 && rdesc[75] == 0x15 && rdesc[77] == 0x16
|
||||
&& rdesc[78] == 0xff && rdesc[79] == 0x0f) {
|
||||
hid_info(hdev, "Fixing up report descriptor\n");
|
||||
rdesc[77] = 0x26;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
return rdesc;
|
||||
}
|
||||
|
||||
static const struct hid_device_id corsair_devices[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90),
|
||||
.driver_data = CORSAIR_USE_K90_MACRO |
|
||||
CORSAIR_USE_K90_BACKLIGHT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR,
|
||||
USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -686,10 +729,14 @@ static struct hid_driver corsair_driver = {
|
|||
.event = corsair_event,
|
||||
.remove = corsair_remove,
|
||||
.input_mapping = corsair_input_mapping,
|
||||
.report_fixup = corsair_mouse_report_fixup,
|
||||
};
|
||||
|
||||
module_hid_driver(corsair_driver);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
/* Original K90 driver author */
|
||||
MODULE_AUTHOR("Clement Vuchener");
|
||||
/* Scimitar PRO RGB driver author */
|
||||
MODULE_AUTHOR("Oscar Campos");
|
||||
MODULE_DESCRIPTION("HID driver for Corsair devices");
|
||||
|
|
|
@ -278,6 +278,9 @@
|
|||
#define USB_DEVICE_ID_CORSAIR_K70RGB 0x1b13
|
||||
#define USB_DEVICE_ID_CORSAIR_STRAFE 0x1b15
|
||||
#define USB_DEVICE_ID_CORSAIR_K65RGB 0x1b17
|
||||
#define USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE 0x1b38
|
||||
#define USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE 0x1b39
|
||||
#define USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB 0x1b3e
|
||||
|
||||
#define USB_VENDOR_ID_CREATIVELABS 0x041e
|
||||
#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
|
||||
|
@ -557,6 +560,7 @@
|
|||
|
||||
#define USB_VENDOR_ID_JESS 0x0c45
|
||||
#define USB_DEVICE_ID_JESS_YUREX 0x1010
|
||||
#define USB_DEVICE_ID_JESS_ZEN_AIO_KBD 0x5112
|
||||
|
||||
#define USB_VENDOR_ID_JESS2 0x0f30
|
||||
#define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
|
||||
|
|
|
@ -2632,6 +2632,8 @@ err_stop:
|
|||
sony_leds_remove(sc);
|
||||
if (sc->quirks & SONY_BATTERY_SUPPORT)
|
||||
sony_battery_remove(sc);
|
||||
if (sc->touchpad)
|
||||
sony_unregister_touchpad(sc);
|
||||
sony_cancel_work_sync(sc);
|
||||
kfree(sc->output_report_dmabuf);
|
||||
sony_remove_dev_list(sc);
|
||||
|
|
|
@ -80,6 +80,9 @@ static const struct hid_blacklist {
|
|||
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
|
||||
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
|
||||
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
|
||||
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
|
||||
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
|
||||
|
|
|
@ -2579,7 +2579,9 @@ static void wacom_remove(struct hid_device *hdev)
|
|||
|
||||
/* make sure we don't trigger the LEDs */
|
||||
wacom_led_groups_release(wacom);
|
||||
wacom_release_resources(wacom);
|
||||
|
||||
if (wacom->wacom_wac.features.type != REMOTE)
|
||||
wacom_release_resources(wacom);
|
||||
|
||||
hid_set_drvdata(hdev, NULL);
|
||||
}
|
||||
|
|
|
@ -1959,8 +1959,10 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
|
|||
input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
|
||||
input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
|
||||
input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
|
||||
input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
|
||||
input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
|
||||
if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
|
||||
input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
|
||||
input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
|
||||
}
|
||||
break;
|
||||
case WACOM_HID_WD_FINGERWHEEL:
|
||||
wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
|
||||
|
@ -4197,10 +4199,10 @@ static const struct wacom_features wacom_features_0x343 =
|
|||
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
|
||||
static const struct wacom_features wacom_features_0x360 =
|
||||
{ "Wacom Intuos Pro M", 44800, 29600, 8191, 63,
|
||||
INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 };
|
||||
INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
|
||||
static const struct wacom_features wacom_features_0x361 =
|
||||
{ "Wacom Intuos Pro L", 62200, 43200, 8191, 63,
|
||||
INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 };
|
||||
INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
|
||||
|
||||
static const struct wacom_features wacom_features_HID_ANY_ID =
|
||||
{ "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
|
||||
|
|
|
@ -989,26 +989,29 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
|
|||
struct dm_offload *o = container_of(cb, struct dm_offload, cb);
|
||||
struct bio_list list;
|
||||
struct bio *bio;
|
||||
int i;
|
||||
|
||||
INIT_LIST_HEAD(&o->cb.list);
|
||||
|
||||
if (unlikely(!current->bio_list))
|
||||
return;
|
||||
|
||||
list = *current->bio_list;
|
||||
bio_list_init(current->bio_list);
|
||||
for (i = 0; i < 2; i++) {
|
||||
list = current->bio_list[i];
|
||||
bio_list_init(¤t->bio_list[i]);
|
||||
|
||||
while ((bio = bio_list_pop(&list))) {
|
||||
struct bio_set *bs = bio->bi_pool;
|
||||
if (unlikely(!bs) || bs == fs_bio_set) {
|
||||
bio_list_add(current->bio_list, bio);
|
||||
continue;
|
||||
while ((bio = bio_list_pop(&list))) {
|
||||
struct bio_set *bs = bio->bi_pool;
|
||||
if (unlikely(!bs) || bs == fs_bio_set) {
|
||||
bio_list_add(¤t->bio_list[i], bio);
|
||||
continue;
|
||||
}
|
||||
|
||||
spin_lock(&bs->rescue_lock);
|
||||
bio_list_add(&bs->rescue_list, bio);
|
||||
queue_work(bs->rescue_workqueue, &bs->rescue_work);
|
||||
spin_unlock(&bs->rescue_lock);
|
||||
}
|
||||
|
||||
spin_lock(&bs->rescue_lock);
|
||||
bio_list_add(&bs->rescue_list, bio);
|
||||
queue_work(bs->rescue_workqueue, &bs->rescue_work);
|
||||
spin_unlock(&bs->rescue_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -777,7 +777,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
|
|||
bm_lockres->flags |= DLM_LKF_NOQUEUE;
|
||||
ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
|
||||
if (ret == -EAGAIN) {
|
||||
memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
|
||||
s = read_resync_info(mddev, bm_lockres);
|
||||
if (s) {
|
||||
pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
|
||||
|
@ -974,6 +973,7 @@ static int leave(struct mddev *mddev)
|
|||
lockres_free(cinfo->bitmap_lockres);
|
||||
unlock_all_bitmaps(mddev);
|
||||
dlm_release_lockspace(cinfo->lockspace, 2);
|
||||
kfree(cinfo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -440,14 +440,6 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
|
|||
}
|
||||
EXPORT_SYMBOL(md_flush_request);
|
||||
|
||||
void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
||||
{
|
||||
struct mddev *mddev = cb->data;
|
||||
md_wakeup_thread(mddev->thread);
|
||||
kfree(cb);
|
||||
}
|
||||
EXPORT_SYMBOL(md_unplug);
|
||||
|
||||
static inline struct mddev *mddev_get(struct mddev *mddev)
|
||||
{
|
||||
atomic_inc(&mddev->active);
|
||||
|
@ -1887,7 +1879,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
|
|||
}
|
||||
sb = page_address(rdev->sb_page);
|
||||
sb->data_size = cpu_to_le64(num_sectors);
|
||||
sb->super_offset = rdev->sb_start;
|
||||
sb->super_offset = cpu_to_le64(rdev->sb_start);
|
||||
sb->sb_csum = calc_sb_1_csum(sb);
|
||||
do {
|
||||
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
|
||||
|
@ -2295,7 +2287,7 @@ static bool does_sb_need_changing(struct mddev *mddev)
|
|||
/* Check if any mddev parameters have changed */
|
||||
if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
|
||||
(mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
|
||||
(mddev->layout != le64_to_cpu(sb->layout)) ||
|
||||
(mddev->layout != le32_to_cpu(sb->layout)) ||
|
||||
(mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
|
||||
(mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
|
||||
return true;
|
||||
|
@ -6458,11 +6450,10 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
|
|||
mddev->layout = info->layout;
|
||||
mddev->chunk_sectors = info->chunk_size >> 9;
|
||||
|
||||
mddev->max_disks = MD_SB_DISKS;
|
||||
|
||||
if (mddev->persistent) {
|
||||
mddev->flags = 0;
|
||||
mddev->sb_flags = 0;
|
||||
mddev->max_disks = MD_SB_DISKS;
|
||||
mddev->flags = 0;
|
||||
mddev->sb_flags = 0;
|
||||
}
|
||||
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
|
||||
|
||||
|
@ -6533,8 +6524,12 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
|
|||
return -ENOSPC;
|
||||
}
|
||||
rv = mddev->pers->resize(mddev, num_sectors);
|
||||
if (!rv)
|
||||
revalidate_disk(mddev->gendisk);
|
||||
if (!rv) {
|
||||
if (mddev->queue) {
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk(mddev->gendisk);
|
||||
}
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
|
|
@ -676,16 +676,10 @@ extern void mddev_resume(struct mddev *mddev);
|
|||
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
|
||||
struct mddev *mddev);
|
||||
|
||||
extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
|
||||
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
|
||||
extern void md_update_sb(struct mddev *mddev, int force);
|
||||
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
|
||||
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
|
||||
static inline int mddev_check_plugged(struct mddev *mddev)
|
||||
{
|
||||
return !!blk_check_plugged(md_unplug, mddev,
|
||||
sizeof(struct blk_plug_cb));
|
||||
}
|
||||
|
||||
static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
|
||||
{
|
||||
|
|
|
@ -1027,7 +1027,7 @@ static int get_unqueued_pending(struct r1conf *conf)
|
|||
static void freeze_array(struct r1conf *conf, int extra)
|
||||
{
|
||||
/* Stop sync I/O and normal I/O and wait for everything to
|
||||
* go quite.
|
||||
* go quiet.
|
||||
* This is called in two situations:
|
||||
* 1) management command handlers (reshape, remove disk, quiesce).
|
||||
* 2) one normal I/O request failed.
|
||||
|
@ -1587,9 +1587,30 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio)
|
|||
split = bio;
|
||||
}
|
||||
|
||||
if (bio_data_dir(split) == READ)
|
||||
if (bio_data_dir(split) == READ) {
|
||||
raid1_read_request(mddev, split);
|
||||
else
|
||||
|
||||
/*
|
||||
* If a bio is splitted, the first part of bio will
|
||||
* pass barrier but the bio is queued in
|
||||
* current->bio_list (see generic_make_request). If
|
||||
* there is a raise_barrier() called here, the second
|
||||
* part of bio can't pass barrier. But since the first
|
||||
* part bio isn't dispatched to underlaying disks yet,
|
||||
* the barrier is never released, hence raise_barrier
|
||||
* will alays wait. We have a deadlock.
|
||||
* Note, this only happens in read path. For write
|
||||
* path, the first part of bio is dispatched in a
|
||||
* schedule() call (because of blk plug) or offloaded
|
||||
* to raid10d.
|
||||
* Quitting from the function immediately can change
|
||||
* the bio order queued in bio_list and avoid the deadlock.
|
||||
*/
|
||||
if (split != bio) {
|
||||
generic_make_request(bio);
|
||||
break;
|
||||
}
|
||||
} else
|
||||
raid1_write_request(mddev, split);
|
||||
} while (split != bio);
|
||||
}
|
||||
|
@ -3246,8 +3267,6 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
|
|||
return ret;
|
||||
}
|
||||
md_set_array_sectors(mddev, newsize);
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk(mddev->gendisk);
|
||||
if (sectors > mddev->dev_sectors &&
|
||||
mddev->recovery_cp > mddev->dev_sectors) {
|
||||
mddev->recovery_cp = mddev->dev_sectors;
|
||||
|
|
|
@ -974,7 +974,8 @@ static void wait_barrier(struct r10conf *conf)
|
|||
!conf->barrier ||
|
||||
(atomic_read(&conf->nr_pending) &&
|
||||
current->bio_list &&
|
||||
!bio_list_empty(current->bio_list)),
|
||||
(!bio_list_empty(¤t->bio_list[0]) ||
|
||||
!bio_list_empty(¤t->bio_list[1]))),
|
||||
conf->resync_lock);
|
||||
conf->nr_waiting--;
|
||||
if (!conf->nr_waiting)
|
||||
|
@ -1477,11 +1478,24 @@ retry_write:
|
|||
mbio->bi_bdev = (void*)rdev;
|
||||
|
||||
atomic_inc(&r10_bio->remaining);
|
||||
|
||||
cb = blk_check_plugged(raid10_unplug, mddev,
|
||||
sizeof(*plug));
|
||||
if (cb)
|
||||
plug = container_of(cb, struct raid10_plug_cb,
|
||||
cb);
|
||||
else
|
||||
plug = NULL;
|
||||
spin_lock_irqsave(&conf->device_lock, flags);
|
||||
bio_list_add(&conf->pending_bio_list, mbio);
|
||||
conf->pending_count++;
|
||||
if (plug) {
|
||||
bio_list_add(&plug->pending, mbio);
|
||||
plug->pending_cnt++;
|
||||
} else {
|
||||
bio_list_add(&conf->pending_bio_list, mbio);
|
||||
conf->pending_count++;
|
||||
}
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
if (!mddev_check_plugged(mddev))
|
||||
if (!plug)
|
||||
md_wakeup_thread(mddev->thread);
|
||||
}
|
||||
}
|
||||
|
@ -1571,7 +1585,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
|
|||
split = bio;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a bio is splitted, the first part of bio will pass
|
||||
* barrier but the bio is queued in current->bio_list (see
|
||||
* generic_make_request). If there is a raise_barrier() called
|
||||
* here, the second part of bio can't pass barrier. But since
|
||||
* the first part bio isn't dispatched to underlaying disks
|
||||
* yet, the barrier is never released, hence raise_barrier will
|
||||
* alays wait. We have a deadlock.
|
||||
* Note, this only happens in read path. For write path, the
|
||||
* first part of bio is dispatched in a schedule() call
|
||||
* (because of blk plug) or offloaded to raid10d.
|
||||
* Quitting from the function immediately can change the bio
|
||||
* order queued in bio_list and avoid the deadlock.
|
||||
*/
|
||||
__make_request(mddev, split);
|
||||
if (split != bio && bio_data_dir(bio) == READ) {
|
||||
generic_make_request(bio);
|
||||
break;
|
||||
}
|
||||
} while (split != bio);
|
||||
|
||||
/* In case raid10d snuck in to freeze_array */
|
||||
|
@ -3943,10 +3975,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
|
|||
return ret;
|
||||
}
|
||||
md_set_array_sectors(mddev, size);
|
||||
if (mddev->queue) {
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk(mddev->gendisk);
|
||||
}
|
||||
if (sectors > mddev->dev_sectors &&
|
||||
mddev->recovery_cp > oldsize) {
|
||||
mddev->recovery_cp = oldsize;
|
||||
|
|
|
@ -1401,7 +1401,8 @@ static int set_syndrome_sources(struct page **srcs,
|
|||
(test_bit(R5_Wantdrain, &dev->flags) ||
|
||||
test_bit(R5_InJournal, &dev->flags))) ||
|
||||
(srctype == SYNDROME_SRC_WRITTEN &&
|
||||
dev->written)) {
|
||||
(dev->written ||
|
||||
test_bit(R5_InJournal, &dev->flags)))) {
|
||||
if (test_bit(R5_InJournal, &dev->flags))
|
||||
srcs[slot] = sh->dev[i].orig_page;
|
||||
else
|
||||
|
@ -7605,8 +7606,6 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
|
|||
return ret;
|
||||
}
|
||||
md_set_array_sectors(mddev, newsize);
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk(mddev->gendisk);
|
||||
if (sectors > mddev->dev_sectors &&
|
||||
mddev->recovery_cp > mddev->dev_sectors) {
|
||||
mddev->recovery_cp = mddev->dev_sectors;
|
||||
|
|
|
@ -984,29 +984,29 @@
|
|||
#define XP_ECC_CNT1_DESC_DED_WIDTH 8
|
||||
#define XP_ECC_CNT1_DESC_SEC_INDEX 0
|
||||
#define XP_ECC_CNT1_DESC_SEC_WIDTH 8
|
||||
#define XP_ECC_IER_DESC_DED_INDEX 0
|
||||
#define XP_ECC_IER_DESC_DED_INDEX 5
|
||||
#define XP_ECC_IER_DESC_DED_WIDTH 1
|
||||
#define XP_ECC_IER_DESC_SEC_INDEX 1
|
||||
#define XP_ECC_IER_DESC_SEC_INDEX 4
|
||||
#define XP_ECC_IER_DESC_SEC_WIDTH 1
|
||||
#define XP_ECC_IER_RX_DED_INDEX 2
|
||||
#define XP_ECC_IER_RX_DED_INDEX 3
|
||||
#define XP_ECC_IER_RX_DED_WIDTH 1
|
||||
#define XP_ECC_IER_RX_SEC_INDEX 3
|
||||
#define XP_ECC_IER_RX_SEC_INDEX 2
|
||||
#define XP_ECC_IER_RX_SEC_WIDTH 1
|
||||
#define XP_ECC_IER_TX_DED_INDEX 4
|
||||
#define XP_ECC_IER_TX_DED_INDEX 1
|
||||
#define XP_ECC_IER_TX_DED_WIDTH 1
|
||||
#define XP_ECC_IER_TX_SEC_INDEX 5
|
||||
#define XP_ECC_IER_TX_SEC_INDEX 0
|
||||
#define XP_ECC_IER_TX_SEC_WIDTH 1
|
||||
#define XP_ECC_ISR_DESC_DED_INDEX 0
|
||||
#define XP_ECC_ISR_DESC_DED_INDEX 5
|
||||
#define XP_ECC_ISR_DESC_DED_WIDTH 1
|
||||
#define XP_ECC_ISR_DESC_SEC_INDEX 1
|
||||
#define XP_ECC_ISR_DESC_SEC_INDEX 4
|
||||
#define XP_ECC_ISR_DESC_SEC_WIDTH 1
|
||||
#define XP_ECC_ISR_RX_DED_INDEX 2
|
||||
#define XP_ECC_ISR_RX_DED_INDEX 3
|
||||
#define XP_ECC_ISR_RX_DED_WIDTH 1
|
||||
#define XP_ECC_ISR_RX_SEC_INDEX 3
|
||||
#define XP_ECC_ISR_RX_SEC_INDEX 2
|
||||
#define XP_ECC_ISR_RX_SEC_WIDTH 1
|
||||
#define XP_ECC_ISR_TX_DED_INDEX 4
|
||||
#define XP_ECC_ISR_TX_DED_INDEX 1
|
||||
#define XP_ECC_ISR_TX_DED_WIDTH 1
|
||||
#define XP_ECC_ISR_TX_SEC_INDEX 5
|
||||
#define XP_ECC_ISR_TX_SEC_INDEX 0
|
||||
#define XP_ECC_ISR_TX_SEC_WIDTH 1
|
||||
#define XP_I2C_MUTEX_BUSY_INDEX 31
|
||||
#define XP_I2C_MUTEX_BUSY_WIDTH 1
|
||||
|
@ -1148,8 +1148,8 @@
|
|||
#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
|
||||
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
|
||||
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
|
||||
#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
|
||||
#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
|
||||
#define RX_PACKET_ATTRIBUTES_LAST_INDEX 2
|
||||
#define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1
|
||||
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
|
||||
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
|
||||
#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
|
||||
|
@ -1158,6 +1158,8 @@
|
|||
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
|
||||
#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
|
||||
#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
|
||||
#define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7
|
||||
#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1
|
||||
|
||||
#define RX_NORMAL_DESC0_OVT_INDEX 0
|
||||
#define RX_NORMAL_DESC0_OVT_WIDTH 16
|
||||
|
|
|
@ -1896,10 +1896,15 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
|
|||
|
||||
/* Get the header length */
|
||||
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
|
||||
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
|
||||
FIRST, 1);
|
||||
rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
|
||||
RX_NORMAL_DESC2, HL);
|
||||
if (rdata->rx.hdr_len)
|
||||
pdata->ext_stats.rx_split_header_packets++;
|
||||
} else {
|
||||
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
|
||||
FIRST, 0);
|
||||
}
|
||||
|
||||
/* Get the RSS hash */
|
||||
|
@ -1922,19 +1927,16 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
|
|||
}
|
||||
}
|
||||
|
||||
/* Get the packet length */
|
||||
rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
|
||||
|
||||
if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
|
||||
/* Not all the data has been transferred for this packet */
|
||||
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
|
||||
INCOMPLETE, 1);
|
||||
/* Not all the data has been transferred for this packet */
|
||||
if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This is the last of the data for this packet */
|
||||
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
|
||||
INCOMPLETE, 0);
|
||||
LAST, 1);
|
||||
|
||||
/* Get the packet length */
|
||||
rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
|
||||
|
||||
/* Set checksum done indicator as appropriate */
|
||||
if (netdev->features & NETIF_F_RXCSUM)
|
||||
|
|
|
@ -1972,13 +1972,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
|
|||
{
|
||||
struct sk_buff *skb;
|
||||
u8 *packet;
|
||||
unsigned int copy_len;
|
||||
|
||||
skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
|
||||
if (!skb)
|
||||
return NULL;
|
||||
|
||||
/* Start with the header buffer which may contain just the header
|
||||
/* Pull in the header buffer which may contain just the header
|
||||
* or the header plus data
|
||||
*/
|
||||
dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
|
||||
|
@ -1987,30 +1986,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
|
|||
|
||||
packet = page_address(rdata->rx.hdr.pa.pages) +
|
||||
rdata->rx.hdr.pa.pages_offset;
|
||||
copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
|
||||
copy_len = min(rdata->rx.hdr.dma_len, copy_len);
|
||||
skb_copy_to_linear_data(skb, packet, copy_len);
|
||||
skb_put(skb, copy_len);
|
||||
|
||||
len -= copy_len;
|
||||
if (len) {
|
||||
/* Add the remaining data as a frag */
|
||||
dma_sync_single_range_for_cpu(pdata->dev,
|
||||
rdata->rx.buf.dma_base,
|
||||
rdata->rx.buf.dma_off,
|
||||
rdata->rx.buf.dma_len,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
rdata->rx.buf.pa.pages,
|
||||
rdata->rx.buf.pa.pages_offset,
|
||||
len, rdata->rx.buf.dma_len);
|
||||
rdata->rx.buf.pa.pages = NULL;
|
||||
}
|
||||
skb_copy_to_linear_data(skb, packet, len);
|
||||
skb_put(skb, len);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
|
||||
struct xgbe_packet_data *packet)
|
||||
{
|
||||
/* Always zero if not the first descriptor */
|
||||
if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
|
||||
return 0;
|
||||
|
||||
/* First descriptor with split header, return header length */
|
||||
if (rdata->rx.hdr_len)
|
||||
return rdata->rx.hdr_len;
|
||||
|
||||
/* First descriptor but not the last descriptor and no split header,
|
||||
* so the full buffer was used
|
||||
*/
|
||||
if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
|
||||
return rdata->rx.hdr.dma_len;
|
||||
|
||||
/* First descriptor and last descriptor and no split header, so
|
||||
* calculate how much of the buffer was used
|
||||
*/
|
||||
return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
|
||||
}
|
||||
|
||||
static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
|
||||
struct xgbe_packet_data *packet,
|
||||
unsigned int len)
|
||||
{
|
||||
/* Always the full buffer if not the last descriptor */
|
||||
if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
|
||||
return rdata->rx.buf.dma_len;
|
||||
|
||||
/* Last descriptor so calculate how much of the buffer was used
|
||||
* for the last bit of data
|
||||
*/
|
||||
return rdata->rx.len - len;
|
||||
}
|
||||
|
||||
static int xgbe_tx_poll(struct xgbe_channel *channel)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = channel->pdata;
|
||||
|
@ -2093,8 +2111,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
|||
struct napi_struct *napi;
|
||||
struct sk_buff *skb;
|
||||
struct skb_shared_hwtstamps *hwtstamps;
|
||||
unsigned int incomplete, error, context_next, context;
|
||||
unsigned int len, rdesc_len, max_len;
|
||||
unsigned int last, error, context_next, context;
|
||||
unsigned int len, buf1_len, buf2_len, max_len;
|
||||
unsigned int received = 0;
|
||||
int packet_count = 0;
|
||||
|
||||
|
@ -2104,7 +2122,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
|||
if (!ring)
|
||||
return 0;
|
||||
|
||||
incomplete = 0;
|
||||
last = 0;
|
||||
context_next = 0;
|
||||
|
||||
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
|
||||
|
@ -2138,9 +2156,8 @@ read_again:
|
|||
received++;
|
||||
ring->cur++;
|
||||
|
||||
incomplete = XGMAC_GET_BITS(packet->attributes,
|
||||
RX_PACKET_ATTRIBUTES,
|
||||
INCOMPLETE);
|
||||
last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
|
||||
LAST);
|
||||
context_next = XGMAC_GET_BITS(packet->attributes,
|
||||
RX_PACKET_ATTRIBUTES,
|
||||
CONTEXT_NEXT);
|
||||
|
@ -2149,7 +2166,7 @@ read_again:
|
|||
CONTEXT);
|
||||
|
||||
/* Earlier error, just drain the remaining data */
|
||||
if ((incomplete || context_next) && error)
|
||||
if ((!last || context_next) && error)
|
||||
goto read_again;
|
||||
|
||||
if (error || packet->errors) {
|
||||
|
@ -2161,16 +2178,22 @@ read_again:
|
|||
}
|
||||
|
||||
if (!context) {
|
||||
/* Length is cumulative, get this descriptor's length */
|
||||
rdesc_len = rdata->rx.len - len;
|
||||
len += rdesc_len;
|
||||
/* Get the data length in the descriptor buffers */
|
||||
buf1_len = xgbe_rx_buf1_len(rdata, packet);
|
||||
len += buf1_len;
|
||||
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
|
||||
len += buf2_len;
|
||||
|
||||
if (rdesc_len && !skb) {
|
||||
if (!skb) {
|
||||
skb = xgbe_create_skb(pdata, napi, rdata,
|
||||
rdesc_len);
|
||||
if (!skb)
|
||||
buf1_len);
|
||||
if (!skb) {
|
||||
error = 1;
|
||||
} else if (rdesc_len) {
|
||||
goto skip_data;
|
||||
}
|
||||
}
|
||||
|
||||
if (buf2_len) {
|
||||
dma_sync_single_range_for_cpu(pdata->dev,
|
||||
rdata->rx.buf.dma_base,
|
||||
rdata->rx.buf.dma_off,
|
||||
|
@ -2180,13 +2203,14 @@ read_again:
|
|||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
rdata->rx.buf.pa.pages,
|
||||
rdata->rx.buf.pa.pages_offset,
|
||||
rdesc_len,
|
||||
buf2_len,
|
||||
rdata->rx.buf.dma_len);
|
||||
rdata->rx.buf.pa.pages = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (incomplete || context_next)
|
||||
skip_data:
|
||||
if (!last || context_next)
|
||||
goto read_again;
|
||||
|
||||
if (!skb)
|
||||
|
@ -2244,7 +2268,7 @@ next_packet:
|
|||
}
|
||||
|
||||
/* Check if we need to save state before leaving */
|
||||
if (received && (incomplete || context_next)) {
|
||||
if (received && (!last || context_next)) {
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
|
||||
rdata->state_saved = 1;
|
||||
rdata->state.skb = skb;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue