Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
commit
463910e2df
|
@ -27,5 +27,11 @@ You have to add the following kernel parameters in your elilo.conf:
|
||||||
Macbook Pro 17", iMac 20" :
|
Macbook Pro 17", iMac 20" :
|
||||||
video=efifb:i20
|
video=efifb:i20
|
||||||
|
|
||||||
|
Accepted options:
|
||||||
|
|
||||||
|
nowc Don't map the framebuffer write combined. This can be used
|
||||||
|
to workaround side-effects and slowdowns on other CPU cores
|
||||||
|
when large amounts of console data are written.
|
||||||
|
|
||||||
--
|
--
|
||||||
Edgar Hucek <gimli@dark-green.com>
|
Edgar Hucek <gimli@dark-green.com>
|
||||||
|
|
|
@ -14016,6 +14016,7 @@ F: drivers/block/virtio_blk.c
|
||||||
F: include/linux/virtio*.h
|
F: include/linux/virtio*.h
|
||||||
F: include/uapi/linux/virtio_*.h
|
F: include/uapi/linux/virtio_*.h
|
||||||
F: drivers/crypto/virtio/
|
F: drivers/crypto/virtio/
|
||||||
|
F: mm/balloon_compaction.c
|
||||||
|
|
||||||
VIRTIO CRYPTO DRIVER
|
VIRTIO CRYPTO DRIVER
|
||||||
M: Gonglei <arei.gonglei@huawei.com>
|
M: Gonglei <arei.gonglei@huawei.com>
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 13
|
PATCHLEVEL = 13
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc4
|
EXTRAVERSION = -rc5
|
||||||
NAME = Fearless Coyote
|
NAME = Fearless Coyote
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|
|
@ -148,7 +148,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||||
|
unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
tlb->mm = mm;
|
tlb->mm = mm;
|
||||||
tlb->fullmm = !(start | (end+1));
|
tlb->fullmm = !(start | (end+1));
|
||||||
|
@ -166,8 +167,14 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||||
|
unsigned long start, unsigned long end, bool force)
|
||||||
{
|
{
|
||||||
|
if (force) {
|
||||||
|
tlb->range_start = start;
|
||||||
|
tlb->range_end = end;
|
||||||
|
}
|
||||||
|
|
||||||
tlb_flush_mmu(tlb);
|
tlb_flush_mmu(tlb);
|
||||||
|
|
||||||
/* keep the page table cache within bounds */
|
/* keep the page table cache within bounds */
|
||||||
|
|
|
@ -168,7 +168,8 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
|
||||||
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||||
|
unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
tlb->mm = mm;
|
tlb->mm = mm;
|
||||||
tlb->max = ARRAY_SIZE(tlb->local);
|
tlb->max = ARRAY_SIZE(tlb->local);
|
||||||
|
@ -185,8 +186,11 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
|
||||||
* collected.
|
* collected.
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||||
|
unsigned long start, unsigned long end, bool force)
|
||||||
{
|
{
|
||||||
|
if (force)
|
||||||
|
tlb->need_flush = 1;
|
||||||
/*
|
/*
|
||||||
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
|
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
|
||||||
* tlb->end_addr.
|
* tlb->end_addr.
|
||||||
|
|
|
@ -2260,7 +2260,7 @@ config CPU_R4K_CACHE_TLB
|
||||||
|
|
||||||
config MIPS_MT_SMP
|
config MIPS_MT_SMP
|
||||||
bool "MIPS MT SMP support (1 TC on each available VPE)"
|
bool "MIPS MT SMP support (1 TC on each available VPE)"
|
||||||
depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6
|
depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS
|
||||||
select CPU_MIPSR2_IRQ_VI
|
select CPU_MIPSR2_IRQ_VI
|
||||||
select CPU_MIPSR2_IRQ_EI
|
select CPU_MIPSR2_IRQ_EI
|
||||||
select SYNC_R4K
|
select SYNC_R4K
|
||||||
|
|
|
@ -243,8 +243,21 @@ include arch/mips/Kbuild.platforms
|
||||||
ifdef CONFIG_PHYSICAL_START
|
ifdef CONFIG_PHYSICAL_START
|
||||||
load-y = $(CONFIG_PHYSICAL_START)
|
load-y = $(CONFIG_PHYSICAL_START)
|
||||||
endif
|
endif
|
||||||
entry-y = 0x$(shell $(NM) vmlinux 2>/dev/null \
|
|
||||||
|
entry-noisa-y = 0x$(shell $(NM) vmlinux 2>/dev/null \
|
||||||
| grep "\bkernel_entry\b" | cut -f1 -d \ )
|
| grep "\bkernel_entry\b" | cut -f1 -d \ )
|
||||||
|
ifdef CONFIG_CPU_MICROMIPS
|
||||||
|
#
|
||||||
|
# Set the ISA bit, since the kernel_entry symbol in the ELF will have it
|
||||||
|
# clear which would lead to images containing addresses which bootloaders may
|
||||||
|
# jump to as MIPS32 code.
|
||||||
|
#
|
||||||
|
entry-y = $(patsubst %0,%1,$(patsubst %2,%3,$(patsubst %4,%5, \
|
||||||
|
$(patsubst %6,%7,$(patsubst %8,%9,$(patsubst %a,%b, \
|
||||||
|
$(patsubst %c,%d,$(patsubst %e,%f,$(entry-noisa-y)))))))))
|
||||||
|
else
|
||||||
|
entry-y = $(entry-noisa-y)
|
||||||
|
endif
|
||||||
|
|
||||||
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
|
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
|
||||||
drivers-$(CONFIG_PCI) += arch/mips/pci/
|
drivers-$(CONFIG_PCI) += arch/mips/pci/
|
||||||
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
ashldi3.c
|
||||||
|
bswapsi.c
|
|
@ -13,9 +13,9 @@
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/of_platform.h>
|
#include <linux/of_platform.h>
|
||||||
|
#include <linux/io.h>
|
||||||
|
|
||||||
#include <asm/octeon/octeon.h>
|
#include <asm/octeon/octeon.h>
|
||||||
#include <asm/octeon/cvmx-gpio-defs.h>
|
|
||||||
|
|
||||||
/* USB Control Register */
|
/* USB Control Register */
|
||||||
union cvm_usbdrd_uctl_ctl {
|
union cvm_usbdrd_uctl_ctl {
|
||||||
|
|
|
@ -147,23 +147,12 @@
|
||||||
* Find irq with highest priority
|
* Find irq with highest priority
|
||||||
*/
|
*/
|
||||||
# open coded PTR_LA t1, cpu_mask_nr_tbl
|
# open coded PTR_LA t1, cpu_mask_nr_tbl
|
||||||
#if (_MIPS_SZPTR == 32)
|
#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
|
||||||
# open coded la t1, cpu_mask_nr_tbl
|
# open coded la t1, cpu_mask_nr_tbl
|
||||||
lui t1, %hi(cpu_mask_nr_tbl)
|
lui t1, %hi(cpu_mask_nr_tbl)
|
||||||
addiu t1, %lo(cpu_mask_nr_tbl)
|
addiu t1, %lo(cpu_mask_nr_tbl)
|
||||||
|
#else
|
||||||
#endif
|
#error GCC `-msym32' option required for 64-bit DECstation builds
|
||||||
#if (_MIPS_SZPTR == 64)
|
|
||||||
# open coded dla t1, cpu_mask_nr_tbl
|
|
||||||
.set push
|
|
||||||
.set noat
|
|
||||||
lui t1, %highest(cpu_mask_nr_tbl)
|
|
||||||
lui AT, %hi(cpu_mask_nr_tbl)
|
|
||||||
daddiu t1, t1, %higher(cpu_mask_nr_tbl)
|
|
||||||
daddiu AT, AT, %lo(cpu_mask_nr_tbl)
|
|
||||||
dsll t1, 32
|
|
||||||
daddu t1, t1, AT
|
|
||||||
.set pop
|
|
||||||
#endif
|
#endif
|
||||||
1: lw t2,(t1)
|
1: lw t2,(t1)
|
||||||
nop
|
nop
|
||||||
|
@ -214,23 +203,12 @@
|
||||||
* Find irq with highest priority
|
* Find irq with highest priority
|
||||||
*/
|
*/
|
||||||
# open coded PTR_LA t1,asic_mask_nr_tbl
|
# open coded PTR_LA t1,asic_mask_nr_tbl
|
||||||
#if (_MIPS_SZPTR == 32)
|
#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
|
||||||
# open coded la t1, asic_mask_nr_tbl
|
# open coded la t1, asic_mask_nr_tbl
|
||||||
lui t1, %hi(asic_mask_nr_tbl)
|
lui t1, %hi(asic_mask_nr_tbl)
|
||||||
addiu t1, %lo(asic_mask_nr_tbl)
|
addiu t1, %lo(asic_mask_nr_tbl)
|
||||||
|
#else
|
||||||
#endif
|
#error GCC `-msym32' option required for 64-bit DECstation builds
|
||||||
#if (_MIPS_SZPTR == 64)
|
|
||||||
# open coded dla t1, asic_mask_nr_tbl
|
|
||||||
.set push
|
|
||||||
.set noat
|
|
||||||
lui t1, %highest(asic_mask_nr_tbl)
|
|
||||||
lui AT, %hi(asic_mask_nr_tbl)
|
|
||||||
daddiu t1, t1, %higher(asic_mask_nr_tbl)
|
|
||||||
daddiu AT, AT, %lo(asic_mask_nr_tbl)
|
|
||||||
dsll t1, 32
|
|
||||||
daddu t1, t1, AT
|
|
||||||
.set pop
|
|
||||||
#endif
|
#endif
|
||||||
2: lw t2,(t1)
|
2: lw t2,(t1)
|
||||||
nop
|
nop
|
||||||
|
|
|
@ -9,6 +9,8 @@
|
||||||
#ifndef _ASM_CACHE_H
|
#ifndef _ASM_CACHE_H
|
||||||
#define _ASM_CACHE_H
|
#define _ASM_CACHE_H
|
||||||
|
|
||||||
|
#include <kmalloc.h>
|
||||||
|
|
||||||
#define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
|
#define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
|
||||||
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
||||||
|
|
||||||
|
|
|
@ -428,6 +428,9 @@
|
||||||
#ifndef cpu_scache_line_size
|
#ifndef cpu_scache_line_size
|
||||||
#define cpu_scache_line_size() cpu_data[0].scache.linesz
|
#define cpu_scache_line_size() cpu_data[0].scache.linesz
|
||||||
#endif
|
#endif
|
||||||
|
#ifndef cpu_tcache_line_size
|
||||||
|
#define cpu_tcache_line_size() cpu_data[0].tcache.linesz
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef cpu_hwrena_impl_bits
|
#ifndef cpu_hwrena_impl_bits
|
||||||
#define cpu_hwrena_impl_bits 0
|
#define cpu_hwrena_impl_bits 0
|
||||||
|
|
|
@ -33,6 +33,10 @@
|
||||||
#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
|
#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
|
||||||
#define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull))
|
#define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull))
|
||||||
#define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull))
|
#define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull))
|
||||||
|
#define CVMX_L2C_ERR_TDTX(block_id) \
|
||||||
|
(CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull)
|
||||||
|
#define CVMX_L2C_ERR_TTGX(block_id) \
|
||||||
|
(CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull)
|
||||||
#define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull))
|
#define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull))
|
||||||
#define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull))
|
#define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull))
|
||||||
#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
|
#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
|
||||||
|
@ -66,9 +70,40 @@
|
||||||
((offset) & 1) * 8)
|
((offset) & 1) * 8)
|
||||||
#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + \
|
#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + \
|
||||||
((offset) & 31) * 8)
|
((offset) & 31) * 8)
|
||||||
#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
|
|
||||||
|
|
||||||
|
|
||||||
|
union cvmx_l2c_err_tdtx {
|
||||||
|
uint64_t u64;
|
||||||
|
struct cvmx_l2c_err_tdtx_s {
|
||||||
|
__BITFIELD_FIELD(uint64_t dbe:1,
|
||||||
|
__BITFIELD_FIELD(uint64_t sbe:1,
|
||||||
|
__BITFIELD_FIELD(uint64_t vdbe:1,
|
||||||
|
__BITFIELD_FIELD(uint64_t vsbe:1,
|
||||||
|
__BITFIELD_FIELD(uint64_t syn:10,
|
||||||
|
__BITFIELD_FIELD(uint64_t reserved_22_49:28,
|
||||||
|
__BITFIELD_FIELD(uint64_t wayidx:18,
|
||||||
|
__BITFIELD_FIELD(uint64_t reserved_2_3:2,
|
||||||
|
__BITFIELD_FIELD(uint64_t type:2,
|
||||||
|
;)))))))))
|
||||||
|
} s;
|
||||||
|
};
|
||||||
|
|
||||||
|
union cvmx_l2c_err_ttgx {
|
||||||
|
uint64_t u64;
|
||||||
|
struct cvmx_l2c_err_ttgx_s {
|
||||||
|
__BITFIELD_FIELD(uint64_t dbe:1,
|
||||||
|
__BITFIELD_FIELD(uint64_t sbe:1,
|
||||||
|
__BITFIELD_FIELD(uint64_t noway:1,
|
||||||
|
__BITFIELD_FIELD(uint64_t reserved_56_60:5,
|
||||||
|
__BITFIELD_FIELD(uint64_t syn:6,
|
||||||
|
__BITFIELD_FIELD(uint64_t reserved_22_49:28,
|
||||||
|
__BITFIELD_FIELD(uint64_t wayidx:15,
|
||||||
|
__BITFIELD_FIELD(uint64_t reserved_2_6:5,
|
||||||
|
__BITFIELD_FIELD(uint64_t type:2,
|
||||||
|
;)))))))))
|
||||||
|
} s;
|
||||||
|
};
|
||||||
|
|
||||||
union cvmx_l2c_cfg {
|
union cvmx_l2c_cfg {
|
||||||
uint64_t u64;
|
uint64_t u64;
|
||||||
struct cvmx_l2c_cfg_s {
|
struct cvmx_l2c_cfg_s {
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
/***********************license start***************
|
||||||
|
* Author: Cavium Networks
|
||||||
|
*
|
||||||
|
* Contact: support@caviumnetworks.com
|
||||||
|
* This file is part of the OCTEON SDK
|
||||||
|
*
|
||||||
|
* Copyright (c) 2003-2017 Cavium, Inc.
|
||||||
|
*
|
||||||
|
* This file is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License, Version 2, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This file is distributed in the hope that it will be useful, but
|
||||||
|
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
|
||||||
|
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
|
||||||
|
* NONINFRINGEMENT. See the GNU General Public License for more
|
||||||
|
* details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this file; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
* or visit http://www.gnu.org/licenses/.
|
||||||
|
*
|
||||||
|
* This file may also be available under a different license from Cavium.
|
||||||
|
* Contact Cavium Networks for more information
|
||||||
|
***********************license end**************************************/
|
||||||
|
|
||||||
|
#ifndef __CVMX_L2D_DEFS_H__
|
||||||
|
#define __CVMX_L2D_DEFS_H__
|
||||||
|
|
||||||
|
#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))
|
||||||
|
#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
|
||||||
|
|
||||||
|
|
||||||
|
union cvmx_l2d_err {
|
||||||
|
uint64_t u64;
|
||||||
|
struct cvmx_l2d_err_s {
|
||||||
|
__BITFIELD_FIELD(uint64_t reserved_6_63:58,
|
||||||
|
__BITFIELD_FIELD(uint64_t bmhclsel:1,
|
||||||
|
__BITFIELD_FIELD(uint64_t ded_err:1,
|
||||||
|
__BITFIELD_FIELD(uint64_t sec_err:1,
|
||||||
|
__BITFIELD_FIELD(uint64_t ded_intena:1,
|
||||||
|
__BITFIELD_FIELD(uint64_t sec_intena:1,
|
||||||
|
__BITFIELD_FIELD(uint64_t ecc_ena:1,
|
||||||
|
;)))))))
|
||||||
|
} s;
|
||||||
|
};
|
||||||
|
|
||||||
|
union cvmx_l2d_fus3 {
|
||||||
|
uint64_t u64;
|
||||||
|
struct cvmx_l2d_fus3_s {
|
||||||
|
__BITFIELD_FIELD(uint64_t reserved_40_63:24,
|
||||||
|
__BITFIELD_FIELD(uint64_t ema_ctl:3,
|
||||||
|
__BITFIELD_FIELD(uint64_t reserved_34_36:3,
|
||||||
|
__BITFIELD_FIELD(uint64_t q3fus:34,
|
||||||
|
;))))
|
||||||
|
} s;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif
|
|
@ -62,6 +62,7 @@ enum cvmx_mips_space {
|
||||||
#include <asm/octeon/cvmx-iob-defs.h>
|
#include <asm/octeon/cvmx-iob-defs.h>
|
||||||
#include <asm/octeon/cvmx-ipd-defs.h>
|
#include <asm/octeon/cvmx-ipd-defs.h>
|
||||||
#include <asm/octeon/cvmx-l2c-defs.h>
|
#include <asm/octeon/cvmx-l2c-defs.h>
|
||||||
|
#include <asm/octeon/cvmx-l2d-defs.h>
|
||||||
#include <asm/octeon/cvmx-l2t-defs.h>
|
#include <asm/octeon/cvmx-l2t-defs.h>
|
||||||
#include <asm/octeon/cvmx-led-defs.h>
|
#include <asm/octeon/cvmx-led-defs.h>
|
||||||
#include <asm/octeon/cvmx-mio-defs.h>
|
#include <asm/octeon/cvmx-mio-defs.h>
|
||||||
|
|
|
@ -376,9 +376,6 @@ asmlinkage void start_secondary(void)
|
||||||
cpumask_set_cpu(cpu, &cpu_coherent_mask);
|
cpumask_set_cpu(cpu, &cpu_coherent_mask);
|
||||||
notify_cpu_starting(cpu);
|
notify_cpu_starting(cpu);
|
||||||
|
|
||||||
complete(&cpu_running);
|
|
||||||
synchronise_count_slave(cpu);
|
|
||||||
|
|
||||||
set_cpu_online(cpu, true);
|
set_cpu_online(cpu, true);
|
||||||
|
|
||||||
set_cpu_sibling_map(cpu);
|
set_cpu_sibling_map(cpu);
|
||||||
|
@ -386,6 +383,9 @@ asmlinkage void start_secondary(void)
|
||||||
|
|
||||||
calculate_cpu_foreign_map();
|
calculate_cpu_foreign_map();
|
||||||
|
|
||||||
|
complete(&cpu_running);
|
||||||
|
synchronise_count_slave(cpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* irq will be enabled in ->smp_finish(), enabling it too early
|
* irq will be enabled in ->smp_finish(), enabling it too early
|
||||||
* is dangerous.
|
* is dangerous.
|
||||||
|
|
|
@ -48,7 +48,7 @@
|
||||||
|
|
||||||
#include "uasm.c"
|
#include "uasm.c"
|
||||||
|
|
||||||
static const struct insn const insn_table[insn_invalid] = {
|
static const struct insn insn_table[insn_invalid] = {
|
||||||
[insn_addiu] = {M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
|
[insn_addiu] = {M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
|
||||||
[insn_addu] = {M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD},
|
[insn_addu] = {M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD},
|
||||||
[insn_and] = {M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD},
|
[insn_and] = {M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD},
|
||||||
|
|
|
@ -28,16 +28,15 @@ EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
|
||||||
|
|
||||||
static int __init pcibios_set_cache_line_size(void)
|
static int __init pcibios_set_cache_line_size(void)
|
||||||
{
|
{
|
||||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
|
||||||
unsigned int lsize;
|
unsigned int lsize;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set PCI cacheline size to that of the highest level in the
|
* Set PCI cacheline size to that of the highest level in the
|
||||||
* cache hierarchy.
|
* cache hierarchy.
|
||||||
*/
|
*/
|
||||||
lsize = c->dcache.linesz;
|
lsize = cpu_dcache_line_size();
|
||||||
lsize = c->scache.linesz ? : lsize;
|
lsize = cpu_scache_line_size() ? : lsize;
|
||||||
lsize = c->tcache.linesz ? : lsize;
|
lsize = cpu_tcache_line_size() ? : lsize;
|
||||||
|
|
||||||
BUG_ON(!lsize);
|
BUG_ON(!lsize);
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,8 @@ static __always_inline long gettimeofday_fallback(struct timeval *_tv,
|
||||||
" syscall\n"
|
" syscall\n"
|
||||||
: "=r" (ret), "=r" (error)
|
: "=r" (ret), "=r" (error)
|
||||||
: "r" (tv), "r" (tz), "r" (nr)
|
: "r" (tv), "r" (tz), "r" (nr)
|
||||||
: "memory");
|
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
|
||||||
|
"$14", "$15", "$24", "$25", "hi", "lo", "memory");
|
||||||
|
|
||||||
return error ? -ret : ret;
|
return error ? -ret : ret;
|
||||||
}
|
}
|
||||||
|
@ -55,7 +56,8 @@ static __always_inline long clock_gettime_fallback(clockid_t _clkid,
|
||||||
" syscall\n"
|
" syscall\n"
|
||||||
: "=r" (ret), "=r" (error)
|
: "=r" (ret), "=r" (error)
|
||||||
: "r" (clkid), "r" (ts), "r" (nr)
|
: "r" (clkid), "r" (ts), "r" (nr)
|
||||||
: "memory");
|
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
|
||||||
|
"$14", "$15", "$24", "$25", "hi", "lo", "memory");
|
||||||
|
|
||||||
return error ? -ret : ret;
|
return error ? -ret : ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -293,7 +293,8 @@ CONFIG_MAGIC_SYSRQ=y
|
||||||
CONFIG_DEBUG_KERNEL=y
|
CONFIG_DEBUG_KERNEL=y
|
||||||
CONFIG_DEBUG_STACK_USAGE=y
|
CONFIG_DEBUG_STACK_USAGE=y
|
||||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||||
CONFIG_LOCKUP_DETECTOR=y
|
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||||
|
CONFIG_HARDLOCKUP_DETECTOR=y
|
||||||
CONFIG_LATENCYTOP=y
|
CONFIG_LATENCYTOP=y
|
||||||
CONFIG_SCHED_TRACER=y
|
CONFIG_SCHED_TRACER=y
|
||||||
CONFIG_BLK_DEV_IO_TRACE=y
|
CONFIG_BLK_DEV_IO_TRACE=y
|
||||||
|
|
|
@ -324,7 +324,8 @@ CONFIG_MAGIC_SYSRQ=y
|
||||||
CONFIG_DEBUG_KERNEL=y
|
CONFIG_DEBUG_KERNEL=y
|
||||||
CONFIG_DEBUG_STACK_USAGE=y
|
CONFIG_DEBUG_STACK_USAGE=y
|
||||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||||
CONFIG_LOCKUP_DETECTOR=y
|
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||||
|
CONFIG_HARDLOCKUP_DETECTOR=y
|
||||||
CONFIG_DEBUG_MUTEXES=y
|
CONFIG_DEBUG_MUTEXES=y
|
||||||
CONFIG_LATENCYTOP=y
|
CONFIG_LATENCYTOP=y
|
||||||
CONFIG_SCHED_TRACER=y
|
CONFIG_SCHED_TRACER=y
|
||||||
|
|
|
@ -291,7 +291,8 @@ CONFIG_MAGIC_SYSRQ=y
|
||||||
CONFIG_DEBUG_KERNEL=y
|
CONFIG_DEBUG_KERNEL=y
|
||||||
CONFIG_DEBUG_STACK_USAGE=y
|
CONFIG_DEBUG_STACK_USAGE=y
|
||||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||||
CONFIG_LOCKUP_DETECTOR=y
|
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||||
|
CONFIG_HARDLOCKUP_DETECTOR=y
|
||||||
CONFIG_LATENCYTOP=y
|
CONFIG_LATENCYTOP=y
|
||||||
CONFIG_SCHED_TRACER=y
|
CONFIG_SCHED_TRACER=y
|
||||||
CONFIG_BLK_DEV_IO_TRACE=y
|
CONFIG_BLK_DEV_IO_TRACE=y
|
||||||
|
|
|
@ -223,17 +223,27 @@ system_call_exit:
|
||||||
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
|
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
|
||||||
bne- .Lsyscall_exit_work
|
bne- .Lsyscall_exit_work
|
||||||
|
|
||||||
/* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */
|
andi. r0,r8,MSR_FP
|
||||||
li r7,MSR_FP
|
beq 2f
|
||||||
#ifdef CONFIG_ALTIVEC
|
#ifdef CONFIG_ALTIVEC
|
||||||
oris r7,r7,MSR_VEC@h
|
andis. r0,r8,MSR_VEC@h
|
||||||
|
bne 3f
|
||||||
#endif
|
#endif
|
||||||
and r0,r8,r7
|
2: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||||
cmpd r0,r7
|
#ifdef CONFIG_PPC_BOOK3S
|
||||||
bne .Lsyscall_restore_math
|
li r10,MSR_RI
|
||||||
.Lsyscall_restore_math_cont:
|
mtmsrd r10,1 /* Restore RI */
|
||||||
|
#endif
|
||||||
|
bl restore_math
|
||||||
|
#ifdef CONFIG_PPC_BOOK3S
|
||||||
|
li r11,0
|
||||||
|
mtmsrd r11,1
|
||||||
|
#endif
|
||||||
|
ld r8,_MSR(r1)
|
||||||
|
ld r3,RESULT(r1)
|
||||||
|
li r11,-MAX_ERRNO
|
||||||
|
|
||||||
cmpld r3,r11
|
3: cmpld r3,r11
|
||||||
ld r5,_CCR(r1)
|
ld r5,_CCR(r1)
|
||||||
bge- .Lsyscall_error
|
bge- .Lsyscall_error
|
||||||
.Lsyscall_error_cont:
|
.Lsyscall_error_cont:
|
||||||
|
@ -267,40 +277,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||||
std r5,_CCR(r1)
|
std r5,_CCR(r1)
|
||||||
b .Lsyscall_error_cont
|
b .Lsyscall_error_cont
|
||||||
|
|
||||||
.Lsyscall_restore_math:
|
|
||||||
/*
|
|
||||||
* Some initial tests from restore_math to avoid the heavyweight
|
|
||||||
* C code entry and MSR manipulations.
|
|
||||||
*/
|
|
||||||
LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK)
|
|
||||||
and. r0,r0,r8
|
|
||||||
bne 1f
|
|
||||||
|
|
||||||
ld r7,PACACURRENT(r13)
|
|
||||||
lbz r0,THREAD+THREAD_LOAD_FP(r7)
|
|
||||||
#ifdef CONFIG_ALTIVEC
|
|
||||||
lbz r6,THREAD+THREAD_LOAD_VEC(r7)
|
|
||||||
add r0,r0,r6
|
|
||||||
#endif
|
|
||||||
cmpdi r0,0
|
|
||||||
beq .Lsyscall_restore_math_cont
|
|
||||||
|
|
||||||
1: addi r3,r1,STACK_FRAME_OVERHEAD
|
|
||||||
#ifdef CONFIG_PPC_BOOK3S
|
|
||||||
li r10,MSR_RI
|
|
||||||
mtmsrd r10,1 /* Restore RI */
|
|
||||||
#endif
|
|
||||||
bl restore_math
|
|
||||||
#ifdef CONFIG_PPC_BOOK3S
|
|
||||||
li r11,0
|
|
||||||
mtmsrd r11,1
|
|
||||||
#endif
|
|
||||||
/* Restore volatiles, reload MSR from updated one */
|
|
||||||
ld r8,_MSR(r1)
|
|
||||||
ld r3,RESULT(r1)
|
|
||||||
li r11,-MAX_ERRNO
|
|
||||||
b .Lsyscall_restore_math_cont
|
|
||||||
|
|
||||||
/* Traced system call support */
|
/* Traced system call support */
|
||||||
.Lsyscall_dotrace:
|
.Lsyscall_dotrace:
|
||||||
bl save_nvgprs
|
bl save_nvgprs
|
||||||
|
|
|
@ -511,10 +511,6 @@ void restore_math(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned long msr;
|
unsigned long msr;
|
||||||
|
|
||||||
/*
|
|
||||||
* Syscall exit makes a similar initial check before branching
|
|
||||||
* to restore_math. Keep them in synch.
|
|
||||||
*/
|
|
||||||
if (!msr_tm_active(regs->msr) &&
|
if (!msr_tm_active(regs->msr) &&
|
||||||
!current->thread.load_fp && !loadvec(current->thread))
|
!current->thread.load_fp && !loadvec(current->thread))
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -351,7 +351,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
|
||||||
hard_irq_disable();
|
hard_irq_disable();
|
||||||
while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
|
while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
|
||||||
raw_local_irq_restore(*flags);
|
raw_local_irq_restore(*flags);
|
||||||
cpu_relax();
|
spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
|
||||||
raw_local_irq_save(*flags);
|
raw_local_irq_save(*flags);
|
||||||
hard_irq_disable();
|
hard_irq_disable();
|
||||||
}
|
}
|
||||||
|
@ -360,7 +360,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
|
||||||
static void nmi_ipi_lock(void)
|
static void nmi_ipi_lock(void)
|
||||||
{
|
{
|
||||||
while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
|
while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
|
||||||
cpu_relax();
|
spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nmi_ipi_unlock(void)
|
static void nmi_ipi_unlock(void)
|
||||||
|
@ -475,7 +475,7 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
|
||||||
nmi_ipi_lock_start(&flags);
|
nmi_ipi_lock_start(&flags);
|
||||||
while (nmi_ipi_busy_count) {
|
while (nmi_ipi_busy_count) {
|
||||||
nmi_ipi_unlock_end(&flags);
|
nmi_ipi_unlock_end(&flags);
|
||||||
cpu_relax();
|
spin_until_cond(nmi_ipi_busy_count == 0);
|
||||||
nmi_ipi_lock_start(&flags);
|
nmi_ipi_lock_start(&flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -71,15 +71,20 @@ static inline void wd_smp_lock(unsigned long *flags)
|
||||||
* This may be called from low level interrupt handlers at some
|
* This may be called from low level interrupt handlers at some
|
||||||
* point in future.
|
* point in future.
|
||||||
*/
|
*/
|
||||||
local_irq_save(*flags);
|
raw_local_irq_save(*flags);
|
||||||
while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock)))
|
hard_irq_disable(); /* Make it soft-NMI safe */
|
||||||
cpu_relax();
|
while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {
|
||||||
|
raw_local_irq_restore(*flags);
|
||||||
|
spin_until_cond(!test_bit(0, &__wd_smp_lock));
|
||||||
|
raw_local_irq_save(*flags);
|
||||||
|
hard_irq_disable();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void wd_smp_unlock(unsigned long *flags)
|
static inline void wd_smp_unlock(unsigned long *flags)
|
||||||
{
|
{
|
||||||
clear_bit_unlock(0, &__wd_smp_lock);
|
clear_bit_unlock(0, &__wd_smp_lock);
|
||||||
local_irq_restore(*flags);
|
raw_local_irq_restore(*flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void wd_lockup_ipi(struct pt_regs *regs)
|
static void wd_lockup_ipi(struct pt_regs *regs)
|
||||||
|
@ -96,10 +101,10 @@ static void wd_lockup_ipi(struct pt_regs *regs)
|
||||||
nmi_panic(regs, "Hard LOCKUP");
|
nmi_panic(regs, "Hard LOCKUP");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_cpu_stuck(int cpu, u64 tb)
|
static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
|
||||||
{
|
{
|
||||||
cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
|
cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
|
||||||
cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
|
cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
|
||||||
if (cpumask_empty(&wd_smp_cpus_pending)) {
|
if (cpumask_empty(&wd_smp_cpus_pending)) {
|
||||||
wd_smp_last_reset_tb = tb;
|
wd_smp_last_reset_tb = tb;
|
||||||
cpumask_andnot(&wd_smp_cpus_pending,
|
cpumask_andnot(&wd_smp_cpus_pending,
|
||||||
|
@ -107,6 +112,10 @@ static void set_cpu_stuck(int cpu, u64 tb)
|
||||||
&wd_smp_cpus_stuck);
|
&wd_smp_cpus_stuck);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
static void set_cpu_stuck(int cpu, u64 tb)
|
||||||
|
{
|
||||||
|
set_cpumask_stuck(cpumask_of(cpu), tb);
|
||||||
|
}
|
||||||
|
|
||||||
static void watchdog_smp_panic(int cpu, u64 tb)
|
static void watchdog_smp_panic(int cpu, u64 tb)
|
||||||
{
|
{
|
||||||
|
@ -135,11 +144,9 @@ static void watchdog_smp_panic(int cpu, u64 tb)
|
||||||
}
|
}
|
||||||
smp_flush_nmi_ipi(1000000);
|
smp_flush_nmi_ipi(1000000);
|
||||||
|
|
||||||
/* Take the stuck CPU out of the watch group */
|
/* Take the stuck CPUs out of the watch group */
|
||||||
for_each_cpu(c, &wd_smp_cpus_pending)
|
set_cpumask_stuck(&wd_smp_cpus_pending, tb);
|
||||||
set_cpu_stuck(c, tb);
|
|
||||||
|
|
||||||
out:
|
|
||||||
wd_smp_unlock(&flags);
|
wd_smp_unlock(&flags);
|
||||||
|
|
||||||
printk_safe_flush();
|
printk_safe_flush();
|
||||||
|
@ -152,6 +159,11 @@ out:
|
||||||
|
|
||||||
if (hardlockup_panic)
|
if (hardlockup_panic)
|
||||||
nmi_panic(NULL, "Hard LOCKUP");
|
nmi_panic(NULL, "Hard LOCKUP");
|
||||||
|
|
||||||
|
return;
|
||||||
|
|
||||||
|
out:
|
||||||
|
wd_smp_unlock(&flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
|
static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
|
||||||
|
@ -258,9 +270,11 @@ static void wd_timer_fn(unsigned long data)
|
||||||
|
|
||||||
void arch_touch_nmi_watchdog(void)
|
void arch_touch_nmi_watchdog(void)
|
||||||
{
|
{
|
||||||
|
unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
watchdog_timer_interrupt(cpu);
|
if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks)
|
||||||
|
watchdog_timer_interrupt(cpu);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
|
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
|
||||||
|
|
||||||
|
@ -283,6 +297,8 @@ static void stop_watchdog_timer_on(unsigned int cpu)
|
||||||
|
|
||||||
static int start_wd_on_cpu(unsigned int cpu)
|
static int start_wd_on_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
|
if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -297,12 +313,14 @@ static int start_wd_on_cpu(unsigned int cpu)
|
||||||
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
|
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
wd_smp_lock(&flags);
|
||||||
cpumask_set_cpu(cpu, &wd_cpus_enabled);
|
cpumask_set_cpu(cpu, &wd_cpus_enabled);
|
||||||
if (cpumask_weight(&wd_cpus_enabled) == 1) {
|
if (cpumask_weight(&wd_cpus_enabled) == 1) {
|
||||||
cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
|
cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
|
||||||
wd_smp_last_reset_tb = get_tb();
|
wd_smp_last_reset_tb = get_tb();
|
||||||
}
|
}
|
||||||
smp_wmb();
|
wd_smp_unlock(&flags);
|
||||||
|
|
||||||
start_watchdog_timer_on(cpu);
|
start_watchdog_timer_on(cpu);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -310,12 +328,17 @@ static int start_wd_on_cpu(unsigned int cpu)
|
||||||
|
|
||||||
static int stop_wd_on_cpu(unsigned int cpu)
|
static int stop_wd_on_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
|
if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
|
||||||
return 0; /* Can happen in CPU unplug case */
|
return 0; /* Can happen in CPU unplug case */
|
||||||
|
|
||||||
stop_watchdog_timer_on(cpu);
|
stop_watchdog_timer_on(cpu);
|
||||||
|
|
||||||
|
wd_smp_lock(&flags);
|
||||||
cpumask_clear_cpu(cpu, &wd_cpus_enabled);
|
cpumask_clear_cpu(cpu, &wd_cpus_enabled);
|
||||||
|
wd_smp_unlock(&flags);
|
||||||
|
|
||||||
wd_smp_clear_cpu_pending(cpu, get_tb());
|
wd_smp_clear_cpu_pending(cpu, get_tb());
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -56,6 +56,7 @@ u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
|
||||||
*/
|
*/
|
||||||
static u64 pnv_deepest_stop_psscr_val;
|
static u64 pnv_deepest_stop_psscr_val;
|
||||||
static u64 pnv_deepest_stop_psscr_mask;
|
static u64 pnv_deepest_stop_psscr_mask;
|
||||||
|
static u64 pnv_deepest_stop_flag;
|
||||||
static bool deepest_stop_found;
|
static bool deepest_stop_found;
|
||||||
|
|
||||||
static int pnv_save_sprs_for_deep_states(void)
|
static int pnv_save_sprs_for_deep_states(void)
|
||||||
|
@ -185,8 +186,40 @@ static void pnv_alloc_idle_core_states(void)
|
||||||
|
|
||||||
update_subcore_sibling_mask();
|
update_subcore_sibling_mask();
|
||||||
|
|
||||||
if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
|
if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
|
||||||
pnv_save_sprs_for_deep_states();
|
int rc = pnv_save_sprs_for_deep_states();
|
||||||
|
|
||||||
|
if (likely(!rc))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The stop-api is unable to restore hypervisor
|
||||||
|
* resources on wakeup from platform idle states which
|
||||||
|
* lose full context. So disable such states.
|
||||||
|
*/
|
||||||
|
supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
|
||||||
|
pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
|
||||||
|
pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
|
||||||
|
|
||||||
|
if (cpu_has_feature(CPU_FTR_ARCH_300) &&
|
||||||
|
(pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
|
||||||
|
/*
|
||||||
|
* Use the default stop state for CPU-Hotplug
|
||||||
|
* if available.
|
||||||
|
*/
|
||||||
|
if (default_stop_found) {
|
||||||
|
pnv_deepest_stop_psscr_val =
|
||||||
|
pnv_default_stop_val;
|
||||||
|
pnv_deepest_stop_psscr_mask =
|
||||||
|
pnv_default_stop_mask;
|
||||||
|
pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
|
||||||
|
pnv_deepest_stop_psscr_val);
|
||||||
|
} else { /* Fallback to snooze loop for CPU-Hotplug */
|
||||||
|
deepest_stop_found = false;
|
||||||
|
pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 pnv_get_supported_cpuidle_states(void)
|
u32 pnv_get_supported_cpuidle_states(void)
|
||||||
|
@ -375,7 +408,8 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
|
||||||
pnv_deepest_stop_psscr_val;
|
pnv_deepest_stop_psscr_val;
|
||||||
srr1 = power9_idle_stop(psscr);
|
srr1 = power9_idle_stop(psscr);
|
||||||
|
|
||||||
} else if (idle_states & OPAL_PM_WINKLE_ENABLED) {
|
} else if ((idle_states & OPAL_PM_WINKLE_ENABLED) &&
|
||||||
|
(idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) {
|
||||||
srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
|
srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
|
||||||
} else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
|
} else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
|
||||||
(idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
|
(idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
|
||||||
|
@ -553,6 +587,7 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
|
||||||
max_residency_ns = residency_ns[i];
|
max_residency_ns = residency_ns[i];
|
||||||
pnv_deepest_stop_psscr_val = psscr_val[i];
|
pnv_deepest_stop_psscr_val = psscr_val[i];
|
||||||
pnv_deepest_stop_psscr_mask = psscr_mask[i];
|
pnv_deepest_stop_psscr_mask = psscr_mask[i];
|
||||||
|
pnv_deepest_stop_flag = flags[i];
|
||||||
deepest_stop_found = true;
|
deepest_stop_found = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,10 +47,9 @@ struct mmu_table_batch {
|
||||||
extern void tlb_table_flush(struct mmu_gather *tlb);
|
extern void tlb_table_flush(struct mmu_gather *tlb);
|
||||||
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
|
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
|
||||||
|
|
||||||
static inline void tlb_gather_mmu(struct mmu_gather *tlb,
|
static inline void
|
||||||
struct mm_struct *mm,
|
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||||
unsigned long start,
|
unsigned long start, unsigned long end)
|
||||||
unsigned long end)
|
|
||||||
{
|
{
|
||||||
tlb->mm = mm;
|
tlb->mm = mm;
|
||||||
tlb->start = start;
|
tlb->start = start;
|
||||||
|
@ -76,9 +75,15 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
||||||
tlb_flush_mmu_free(tlb);
|
tlb_flush_mmu_free(tlb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
|
static inline void
|
||||||
unsigned long start, unsigned long end)
|
arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||||
|
unsigned long start, unsigned long end, bool force)
|
||||||
{
|
{
|
||||||
|
if (force) {
|
||||||
|
tlb->start = start;
|
||||||
|
tlb->end = end;
|
||||||
|
}
|
||||||
|
|
||||||
tlb_flush_mmu(tlb);
|
tlb_flush_mmu(tlb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||||
|
unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
tlb->mm = mm;
|
tlb->mm = mm;
|
||||||
tlb->start = start;
|
tlb->start = start;
|
||||||
|
@ -47,9 +48,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||||
|
unsigned long start, unsigned long end, bool force)
|
||||||
{
|
{
|
||||||
if (tlb->fullmm)
|
if (tlb->fullmm || force)
|
||||||
flush_tlb_mm(tlb->mm);
|
flush_tlb_mm(tlb->mm);
|
||||||
|
|
||||||
/* keep the page table cache within bounds */
|
/* keep the page table cache within bounds */
|
||||||
|
|
|
@ -45,7 +45,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||||
|
unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
tlb->mm = mm;
|
tlb->mm = mm;
|
||||||
tlb->start = start;
|
tlb->start = start;
|
||||||
|
@ -80,13 +81,19 @@ tlb_flush_mmu(struct mmu_gather *tlb)
|
||||||
tlb_flush_mmu_free(tlb);
|
tlb_flush_mmu_free(tlb);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* tlb_finish_mmu
|
/* arch_tlb_finish_mmu
|
||||||
* Called at the end of the shootdown operation to free up any resources
|
* Called at the end of the shootdown operation to free up any resources
|
||||||
* that were required.
|
* that were required.
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||||
|
unsigned long start, unsigned long end, bool force)
|
||||||
{
|
{
|
||||||
|
if (force) {
|
||||||
|
tlb->start = start;
|
||||||
|
tlb->end = end;
|
||||||
|
tlb->need_flush = 1;
|
||||||
|
}
|
||||||
tlb_flush_mmu(tlb);
|
tlb_flush_mmu(tlb);
|
||||||
|
|
||||||
/* keep the page table cache within bounds */
|
/* keep the page table cache within bounds */
|
||||||
|
|
|
@ -117,11 +117,10 @@
|
||||||
.set T1, REG_T1
|
.set T1, REG_T1
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
#define K_BASE %r8
|
|
||||||
#define HASH_PTR %r9
|
#define HASH_PTR %r9
|
||||||
|
#define BLOCKS_CTR %r8
|
||||||
#define BUFFER_PTR %r10
|
#define BUFFER_PTR %r10
|
||||||
#define BUFFER_PTR2 %r13
|
#define BUFFER_PTR2 %r13
|
||||||
#define BUFFER_END %r11
|
|
||||||
|
|
||||||
#define PRECALC_BUF %r14
|
#define PRECALC_BUF %r14
|
||||||
#define WK_BUF %r15
|
#define WK_BUF %r15
|
||||||
|
@ -205,14 +204,14 @@
|
||||||
* blended AVX2 and ALU instruction scheduling
|
* blended AVX2 and ALU instruction scheduling
|
||||||
* 1 vector iteration per 8 rounds
|
* 1 vector iteration per 8 rounds
|
||||||
*/
|
*/
|
||||||
vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
|
vmovdqu (i * 2)(BUFFER_PTR), W_TMP
|
||||||
.elseif ((i & 7) == 1)
|
.elseif ((i & 7) == 1)
|
||||||
vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
|
vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
|
||||||
WY_TMP, WY_TMP
|
WY_TMP, WY_TMP
|
||||||
.elseif ((i & 7) == 2)
|
.elseif ((i & 7) == 2)
|
||||||
vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
|
vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
|
||||||
.elseif ((i & 7) == 4)
|
.elseif ((i & 7) == 4)
|
||||||
vpaddd K_XMM(K_BASE), WY, WY_TMP
|
vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
|
||||||
.elseif ((i & 7) == 7)
|
.elseif ((i & 7) == 7)
|
||||||
vmovdqu WY_TMP, PRECALC_WK(i&~7)
|
vmovdqu WY_TMP, PRECALC_WK(i&~7)
|
||||||
|
|
||||||
|
@ -255,7 +254,7 @@
|
||||||
vpxor WY, WY_TMP, WY_TMP
|
vpxor WY, WY_TMP, WY_TMP
|
||||||
.elseif ((i & 7) == 7)
|
.elseif ((i & 7) == 7)
|
||||||
vpxor WY_TMP2, WY_TMP, WY
|
vpxor WY_TMP2, WY_TMP, WY
|
||||||
vpaddd K_XMM(K_BASE), WY, WY_TMP
|
vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
|
||||||
vmovdqu WY_TMP, PRECALC_WK(i&~7)
|
vmovdqu WY_TMP, PRECALC_WK(i&~7)
|
||||||
|
|
||||||
PRECALC_ROTATE_WY
|
PRECALC_ROTATE_WY
|
||||||
|
@ -291,7 +290,7 @@
|
||||||
vpsrld $30, WY, WY
|
vpsrld $30, WY, WY
|
||||||
vpor WY, WY_TMP, WY
|
vpor WY, WY_TMP, WY
|
||||||
.elseif ((i & 7) == 7)
|
.elseif ((i & 7) == 7)
|
||||||
vpaddd K_XMM(K_BASE), WY, WY_TMP
|
vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
|
||||||
vmovdqu WY_TMP, PRECALC_WK(i&~7)
|
vmovdqu WY_TMP, PRECALC_WK(i&~7)
|
||||||
|
|
||||||
PRECALC_ROTATE_WY
|
PRECALC_ROTATE_WY
|
||||||
|
@ -446,6 +445,16 @@
|
||||||
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
|
||||||
|
* %1 + %2 >= %3 ? %4 : 0
|
||||||
|
*/
|
||||||
|
.macro ADD_IF_GE a, b, c, d
|
||||||
|
mov \a, RTA
|
||||||
|
add $\d, RTA
|
||||||
|
cmp $\c, \b
|
||||||
|
cmovge RTA, \a
|
||||||
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
|
* macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
|
||||||
*/
|
*/
|
||||||
|
@ -463,13 +472,16 @@
|
||||||
lea (2*4*80+32)(%rsp), WK_BUF
|
lea (2*4*80+32)(%rsp), WK_BUF
|
||||||
|
|
||||||
# Precalc WK for first 2 blocks
|
# Precalc WK for first 2 blocks
|
||||||
PRECALC_OFFSET = 0
|
ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
|
||||||
.set i, 0
|
.set i, 0
|
||||||
.rept 160
|
.rept 160
|
||||||
PRECALC i
|
PRECALC i
|
||||||
.set i, i + 1
|
.set i, i + 1
|
||||||
.endr
|
.endr
|
||||||
PRECALC_OFFSET = 128
|
|
||||||
|
/* Go to next block if needed */
|
||||||
|
ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
|
||||||
|
ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
|
||||||
xchg WK_BUF, PRECALC_BUF
|
xchg WK_BUF, PRECALC_BUF
|
||||||
|
|
||||||
.align 32
|
.align 32
|
||||||
|
@ -479,8 +491,8 @@ _loop:
|
||||||
* we use K_BASE value as a signal of a last block,
|
* we use K_BASE value as a signal of a last block,
|
||||||
* it is set below by: cmovae BUFFER_PTR, K_BASE
|
* it is set below by: cmovae BUFFER_PTR, K_BASE
|
||||||
*/
|
*/
|
||||||
cmp K_BASE, BUFFER_PTR
|
test BLOCKS_CTR, BLOCKS_CTR
|
||||||
jne _begin
|
jnz _begin
|
||||||
.align 32
|
.align 32
|
||||||
jmp _end
|
jmp _end
|
||||||
.align 32
|
.align 32
|
||||||
|
@ -512,10 +524,10 @@ _loop0:
|
||||||
.set j, j+2
|
.set j, j+2
|
||||||
.endr
|
.endr
|
||||||
|
|
||||||
add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */
|
/* Update Counter */
|
||||||
cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */
|
sub $1, BLOCKS_CTR
|
||||||
cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
|
/* Move to the next block only if needed*/
|
||||||
|
ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
|
||||||
/*
|
/*
|
||||||
* rounds
|
* rounds
|
||||||
* 60,62,64,66,68
|
* 60,62,64,66,68
|
||||||
|
@ -532,8 +544,8 @@ _loop0:
|
||||||
UPDATE_HASH 12(HASH_PTR), D
|
UPDATE_HASH 12(HASH_PTR), D
|
||||||
UPDATE_HASH 16(HASH_PTR), E
|
UPDATE_HASH 16(HASH_PTR), E
|
||||||
|
|
||||||
cmp K_BASE, BUFFER_PTR /* is current block the last one? */
|
test BLOCKS_CTR, BLOCKS_CTR
|
||||||
je _loop
|
jz _loop
|
||||||
|
|
||||||
mov TB, B
|
mov TB, B
|
||||||
|
|
||||||
|
@ -575,10 +587,10 @@ _loop2:
|
||||||
.set j, j+2
|
.set j, j+2
|
||||||
.endr
|
.endr
|
||||||
|
|
||||||
add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */
|
/* update counter */
|
||||||
|
sub $1, BLOCKS_CTR
|
||||||
cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */
|
/* Move to the next block only if needed*/
|
||||||
cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
|
ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
|
||||||
|
|
||||||
jmp _loop3
|
jmp _loop3
|
||||||
_loop3:
|
_loop3:
|
||||||
|
@ -641,19 +653,12 @@ _loop3:
|
||||||
|
|
||||||
avx2_zeroupper
|
avx2_zeroupper
|
||||||
|
|
||||||
lea K_XMM_AR(%rip), K_BASE
|
/* Setup initial values */
|
||||||
|
|
||||||
mov CTX, HASH_PTR
|
mov CTX, HASH_PTR
|
||||||
mov BUF, BUFFER_PTR
|
mov BUF, BUFFER_PTR
|
||||||
lea 64(BUF), BUFFER_PTR2
|
|
||||||
|
|
||||||
shl $6, CNT /* mul by 64 */
|
mov BUF, BUFFER_PTR2
|
||||||
add BUF, CNT
|
mov CNT, BLOCKS_CTR
|
||||||
add $64, CNT
|
|
||||||
mov CNT, BUFFER_END
|
|
||||||
|
|
||||||
cmp BUFFER_END, BUFFER_PTR2
|
|
||||||
cmovae K_BASE, BUFFER_PTR2
|
|
||||||
|
|
||||||
xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
|
xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
|
||||||
|
|
||||||
|
|
|
@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
|
||||||
|
|
||||||
static bool avx2_usable(void)
|
static bool avx2_usable(void)
|
||||||
{
|
{
|
||||||
if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
|
if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
|
||||||
&& boot_cpu_has(X86_FEATURE_BMI1)
|
&& boot_cpu_has(X86_FEATURE_BMI1)
|
||||||
&& boot_cpu_has(X86_FEATURE_BMI2))
|
&& boot_cpu_has(X86_FEATURE_BMI2))
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -43,6 +43,9 @@ struct hypervisor_x86 {
|
||||||
|
|
||||||
/* pin current vcpu to specified physical cpu (run rarely) */
|
/* pin current vcpu to specified physical cpu (run rarely) */
|
||||||
void (*pin_vcpu)(int);
|
void (*pin_vcpu)(int);
|
||||||
|
|
||||||
|
/* called during init_mem_mapping() to setup early mappings. */
|
||||||
|
void (*init_mem_mapping)(void);
|
||||||
};
|
};
|
||||||
|
|
||||||
extern const struct hypervisor_x86 *x86_hyper;
|
extern const struct hypervisor_x86 *x86_hyper;
|
||||||
|
@ -57,8 +60,15 @@ extern const struct hypervisor_x86 x86_hyper_kvm;
|
||||||
extern void init_hypervisor_platform(void);
|
extern void init_hypervisor_platform(void);
|
||||||
extern bool hypervisor_x2apic_available(void);
|
extern bool hypervisor_x2apic_available(void);
|
||||||
extern void hypervisor_pin_vcpu(int cpu);
|
extern void hypervisor_pin_vcpu(int cpu);
|
||||||
|
|
||||||
|
static inline void hypervisor_init_mem_mapping(void)
|
||||||
|
{
|
||||||
|
if (x86_hyper && x86_hyper->init_mem_mapping)
|
||||||
|
x86_hyper->init_mem_mapping();
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
static inline void init_hypervisor_platform(void) { }
|
static inline void init_hypervisor_platform(void) { }
|
||||||
static inline bool hypervisor_x2apic_available(void) { return false; }
|
static inline bool hypervisor_x2apic_available(void) { return false; }
|
||||||
|
static inline void hypervisor_init_mem_mapping(void) { }
|
||||||
#endif /* CONFIG_HYPERVISOR_GUEST */
|
#endif /* CONFIG_HYPERVISOR_GUEST */
|
||||||
#endif /* _ASM_X86_HYPERVISOR_H */
|
#endif /* _ASM_X86_HYPERVISOR_H */
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <asm/dma.h> /* for MAX_DMA_PFN */
|
#include <asm/dma.h> /* for MAX_DMA_PFN */
|
||||||
#include <asm/microcode.h>
|
#include <asm/microcode.h>
|
||||||
#include <asm/kaslr.h>
|
#include <asm/kaslr.h>
|
||||||
|
#include <asm/hypervisor.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to define the tracepoints somewhere, and tlb.c
|
* We need to define the tracepoints somewhere, and tlb.c
|
||||||
|
@ -636,6 +637,8 @@ void __init init_mem_mapping(void)
|
||||||
load_cr3(swapper_pg_dir);
|
load_cr3(swapper_pg_dir);
|
||||||
__flush_tlb_all();
|
__flush_tlb_all();
|
||||||
|
|
||||||
|
hypervisor_init_mem_mapping();
|
||||||
|
|
||||||
early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
|
early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/hypervisor.h>
|
#include <asm/hypervisor.h>
|
||||||
#include <asm/e820/api.h>
|
#include <asm/e820/api.h>
|
||||||
|
#include <asm/early_ioremap.h>
|
||||||
|
|
||||||
#include <asm/xen/cpuid.h>
|
#include <asm/xen/cpuid.h>
|
||||||
#include <asm/xen/hypervisor.h>
|
#include <asm/xen/hypervisor.h>
|
||||||
|
@ -21,38 +22,50 @@
|
||||||
#include "mmu.h"
|
#include "mmu.h"
|
||||||
#include "smp.h"
|
#include "smp.h"
|
||||||
|
|
||||||
void __ref xen_hvm_init_shared_info(void)
|
static unsigned long shared_info_pfn;
|
||||||
|
|
||||||
|
void xen_hvm_init_shared_info(void)
|
||||||
{
|
{
|
||||||
struct xen_add_to_physmap xatp;
|
struct xen_add_to_physmap xatp;
|
||||||
u64 pa;
|
|
||||||
|
|
||||||
if (HYPERVISOR_shared_info == &xen_dummy_shared_info) {
|
|
||||||
/*
|
|
||||||
* Search for a free page starting at 4kB physical address.
|
|
||||||
* Low memory is preferred to avoid an EPT large page split up
|
|
||||||
* by the mapping.
|
|
||||||
* Starting below X86_RESERVE_LOW (usually 64kB) is fine as
|
|
||||||
* the BIOS used for HVM guests is well behaved and won't
|
|
||||||
* clobber memory other than the first 4kB.
|
|
||||||
*/
|
|
||||||
for (pa = PAGE_SIZE;
|
|
||||||
!e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
|
|
||||||
memblock_is_reserved(pa);
|
|
||||||
pa += PAGE_SIZE)
|
|
||||||
;
|
|
||||||
|
|
||||||
memblock_reserve(pa, PAGE_SIZE);
|
|
||||||
HYPERVISOR_shared_info = __va(pa);
|
|
||||||
}
|
|
||||||
|
|
||||||
xatp.domid = DOMID_SELF;
|
xatp.domid = DOMID_SELF;
|
||||||
xatp.idx = 0;
|
xatp.idx = 0;
|
||||||
xatp.space = XENMAPSPACE_shared_info;
|
xatp.space = XENMAPSPACE_shared_info;
|
||||||
xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
|
xatp.gpfn = shared_info_pfn;
|
||||||
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
|
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __init reserve_shared_info(void)
|
||||||
|
{
|
||||||
|
u64 pa;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Search for a free page starting at 4kB physical address.
|
||||||
|
* Low memory is preferred to avoid an EPT large page split up
|
||||||
|
* by the mapping.
|
||||||
|
* Starting below X86_RESERVE_LOW (usually 64kB) is fine as
|
||||||
|
* the BIOS used for HVM guests is well behaved and won't
|
||||||
|
* clobber memory other than the first 4kB.
|
||||||
|
*/
|
||||||
|
for (pa = PAGE_SIZE;
|
||||||
|
!e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
|
||||||
|
memblock_is_reserved(pa);
|
||||||
|
pa += PAGE_SIZE)
|
||||||
|
;
|
||||||
|
|
||||||
|
shared_info_pfn = PHYS_PFN(pa);
|
||||||
|
|
||||||
|
memblock_reserve(pa, PAGE_SIZE);
|
||||||
|
HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init xen_hvm_init_mem_mapping(void)
|
||||||
|
{
|
||||||
|
early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
|
||||||
|
HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
|
||||||
|
}
|
||||||
|
|
||||||
static void __init init_hvm_pv_info(void)
|
static void __init init_hvm_pv_info(void)
|
||||||
{
|
{
|
||||||
int major, minor;
|
int major, minor;
|
||||||
|
@ -153,6 +166,7 @@ static void __init xen_hvm_guest_init(void)
|
||||||
|
|
||||||
init_hvm_pv_info();
|
init_hvm_pv_info();
|
||||||
|
|
||||||
|
reserve_shared_info();
|
||||||
xen_hvm_init_shared_info();
|
xen_hvm_init_shared_info();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -218,5 +232,6 @@ const struct hypervisor_x86 x86_hyper_xen_hvm = {
|
||||||
.init_platform = xen_hvm_guest_init,
|
.init_platform = xen_hvm_guest_init,
|
||||||
.pin_vcpu = xen_pin_vcpu,
|
.pin_vcpu = xen_pin_vcpu,
|
||||||
.x2apic_available = xen_x2apic_para_available,
|
.x2apic_available = xen_x2apic_para_available,
|
||||||
|
.init_mem_mapping = xen_hvm_init_mem_mapping,
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL(x86_hyper_xen_hvm);
|
EXPORT_SYMBOL(x86_hyper_xen_hvm);
|
||||||
|
|
|
@ -387,9 +387,11 @@ static void bio_integrity_verify_fn(struct work_struct *work)
|
||||||
*/
|
*/
|
||||||
bool __bio_integrity_endio(struct bio *bio)
|
bool __bio_integrity_endio(struct bio *bio)
|
||||||
{
|
{
|
||||||
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status) {
|
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
|
||||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||||
|
|
||||||
|
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
|
||||||
|
(bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) {
|
||||||
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
|
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
|
||||||
queue_work(kintegrityd_wq, &bip->bip_work);
|
queue_work(kintegrityd_wq, &bip->bip_work);
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -684,8 +684,8 @@ EXPORT_SYMBOL(blk_mq_kick_requeue_list);
|
||||||
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
|
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
|
||||||
unsigned long msecs)
|
unsigned long msecs)
|
||||||
{
|
{
|
||||||
kblockd_schedule_delayed_work(&q->requeue_work,
|
kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
|
||||||
msecs_to_jiffies(msecs));
|
msecs_to_jiffies(msecs));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
|
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,16 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/serial_core.h>
|
#include <linux/serial_core.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
|
||||||
|
* occasionally getting stuck as 1. To avoid the potential for a hang, check
|
||||||
|
* TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
|
||||||
|
* implementations, so only do so if an affected platform is detected in
|
||||||
|
* parse_spcr().
|
||||||
|
*/
|
||||||
|
bool qdf2400_e44_present;
|
||||||
|
EXPORT_SYMBOL(qdf2400_e44_present);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
|
* Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
|
||||||
* Detect them by examining the OEM fields in the SPCR header, similiar to PCI
|
* Detect them by examining the OEM fields in the SPCR header, similiar to PCI
|
||||||
|
@ -147,8 +157,30 @@ int __init parse_spcr(bool earlycon)
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (qdf2400_erratum_44_present(&table->header))
|
/*
|
||||||
uart = "qdf2400_e44";
|
* If the E44 erratum is required, then we need to tell the pl011
|
||||||
|
* driver to implement the work-around.
|
||||||
|
*
|
||||||
|
* The global variable is used by the probe function when it
|
||||||
|
* creates the UARTs, whether or not they're used as a console.
|
||||||
|
*
|
||||||
|
* If the user specifies "traditional" earlycon, the qdf2400_e44
|
||||||
|
* console name matches the EARLYCON_DECLARE() statement, and
|
||||||
|
* SPCR is not used. Parameter "earlycon" is false.
|
||||||
|
*
|
||||||
|
* If the user specifies "SPCR" earlycon, then we need to update
|
||||||
|
* the console name so that it also says "qdf2400_e44". Parameter
|
||||||
|
* "earlycon" is true.
|
||||||
|
*
|
||||||
|
* For consistency, if we change the console name, then we do it
|
||||||
|
* for everyone, not just earlycon.
|
||||||
|
*/
|
||||||
|
if (qdf2400_erratum_44_present(&table->header)) {
|
||||||
|
qdf2400_e44_present = true;
|
||||||
|
if (earlycon)
|
||||||
|
uart = "qdf2400_e44";
|
||||||
|
}
|
||||||
|
|
||||||
if (xgene_8250_erratum_present(table))
|
if (xgene_8250_erratum_present(table))
|
||||||
iotype = "mmio32";
|
iotype = "mmio32";
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,6 @@
|
||||||
#include <linux/syscore_ops.h>
|
#include <linux/syscore_ops.h>
|
||||||
#include <linux/reboot.h>
|
#include <linux/reboot.h>
|
||||||
#include <linux/security.h>
|
#include <linux/security.h>
|
||||||
#include <linux/swait.h>
|
|
||||||
|
|
||||||
#include <generated/utsrelease.h>
|
#include <generated/utsrelease.h>
|
||||||
|
|
||||||
|
@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void)
|
||||||
* state of the firmware loading.
|
* state of the firmware loading.
|
||||||
*/
|
*/
|
||||||
struct fw_state {
|
struct fw_state {
|
||||||
struct swait_queue_head wq;
|
struct completion completion;
|
||||||
enum fw_status status;
|
enum fw_status status;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void fw_state_init(struct fw_state *fw_st)
|
static void fw_state_init(struct fw_state *fw_st)
|
||||||
{
|
{
|
||||||
init_swait_queue_head(&fw_st->wq);
|
init_completion(&fw_st->completion);
|
||||||
fw_st->status = FW_STATUS_UNKNOWN;
|
fw_st->status = FW_STATUS_UNKNOWN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
|
||||||
{
|
{
|
||||||
long ret;
|
long ret;
|
||||||
|
|
||||||
ret = swait_event_interruptible_timeout(fw_st->wq,
|
ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
|
||||||
__fw_state_is_done(READ_ONCE(fw_st->status)),
|
|
||||||
timeout);
|
|
||||||
if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
|
if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
if (!ret)
|
if (!ret)
|
||||||
|
@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st,
|
||||||
WRITE_ONCE(fw_st->status, status);
|
WRITE_ONCE(fw_st->status, status);
|
||||||
|
|
||||||
if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
|
if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
|
||||||
swake_up(&fw_st->wq);
|
complete_all(&fw_st->completion);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define fw_state_start(fw_st) \
|
#define fw_state_start(fw_st) \
|
||||||
__fw_state_set(fw_st, FW_STATUS_LOADING)
|
__fw_state_set(fw_st, FW_STATUS_LOADING)
|
||||||
#define fw_state_done(fw_st) \
|
#define fw_state_done(fw_st) \
|
||||||
__fw_state_set(fw_st, FW_STATUS_DONE)
|
__fw_state_set(fw_st, FW_STATUS_DONE)
|
||||||
|
#define fw_state_aborted(fw_st) \
|
||||||
|
__fw_state_set(fw_st, FW_STATUS_ABORTED)
|
||||||
#define fw_state_wait(fw_st) \
|
#define fw_state_wait(fw_st) \
|
||||||
__fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
|
__fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
|
||||||
|
|
||||||
#ifndef CONFIG_FW_LOADER_USER_HELPER
|
|
||||||
|
|
||||||
#define fw_state_is_aborted(fw_st) false
|
|
||||||
|
|
||||||
#else /* CONFIG_FW_LOADER_USER_HELPER */
|
|
||||||
|
|
||||||
static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
|
static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
|
||||||
{
|
{
|
||||||
return fw_st->status == status;
|
return fw_st->status == status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define fw_state_is_aborted(fw_st) \
|
||||||
|
__fw_state_check(fw_st, FW_STATUS_ABORTED)
|
||||||
|
|
||||||
|
#ifdef CONFIG_FW_LOADER_USER_HELPER
|
||||||
|
|
||||||
#define fw_state_aborted(fw_st) \
|
#define fw_state_aborted(fw_st) \
|
||||||
__fw_state_set(fw_st, FW_STATUS_ABORTED)
|
__fw_state_set(fw_st, FW_STATUS_ABORTED)
|
||||||
#define fw_state_is_done(fw_st) \
|
#define fw_state_is_done(fw_st) \
|
||||||
__fw_state_check(fw_st, FW_STATUS_DONE)
|
__fw_state_check(fw_st, FW_STATUS_DONE)
|
||||||
#define fw_state_is_loading(fw_st) \
|
#define fw_state_is_loading(fw_st) \
|
||||||
__fw_state_check(fw_st, FW_STATUS_LOADING)
|
__fw_state_check(fw_st, FW_STATUS_LOADING)
|
||||||
#define fw_state_is_aborted(fw_st) \
|
|
||||||
__fw_state_check(fw_st, FW_STATUS_ABORTED)
|
|
||||||
#define fw_state_wait_timeout(fw_st, timeout) \
|
#define fw_state_wait_timeout(fw_st, timeout) \
|
||||||
__fw_state_wait_common(fw_st, timeout)
|
__fw_state_wait_common(fw_st, timeout)
|
||||||
|
|
||||||
|
@ -1200,6 +1196,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
|
||||||
return 1; /* need to load */
|
return 1; /* need to load */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Batched requests need only one wake, we need to do this step last due to the
|
||||||
|
* fallback mechanism. The buf is protected with kref_get(), and it won't be
|
||||||
|
* released until the last user calls release_firmware().
|
||||||
|
*
|
||||||
|
* Failed batched requests are possible as well, in such cases we just share
|
||||||
|
* the struct firmware_buf and won't release it until all requests are woken
|
||||||
|
* and have gone through this same path.
|
||||||
|
*/
|
||||||
|
static void fw_abort_batch_reqs(struct firmware *fw)
|
||||||
|
{
|
||||||
|
struct firmware_buf *buf;
|
||||||
|
|
||||||
|
/* Loaded directly? */
|
||||||
|
if (!fw || !fw->priv)
|
||||||
|
return;
|
||||||
|
|
||||||
|
buf = fw->priv;
|
||||||
|
if (!fw_state_is_aborted(&buf->fw_st))
|
||||||
|
fw_state_aborted(&buf->fw_st);
|
||||||
|
}
|
||||||
|
|
||||||
/* called from request_firmware() and request_firmware_work_func() */
|
/* called from request_firmware() and request_firmware_work_func() */
|
||||||
static int
|
static int
|
||||||
_request_firmware(const struct firmware **firmware_p, const char *name,
|
_request_firmware(const struct firmware **firmware_p, const char *name,
|
||||||
|
@ -1243,6 +1261,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
fw_abort_batch_reqs(fw);
|
||||||
release_firmware(fw);
|
release_firmware(fw);
|
||||||
fw = NULL;
|
fw = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -308,7 +308,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
|
||||||
struct device_attribute *attr, const char *buf, size_t len)
|
struct device_attribute *attr, const char *buf, size_t len)
|
||||||
{
|
{
|
||||||
struct zram *zram = dev_to_zram(dev);
|
struct zram *zram = dev_to_zram(dev);
|
||||||
char compressor[CRYPTO_MAX_ALG_NAME];
|
char compressor[ARRAY_SIZE(zram->compressor)];
|
||||||
size_t sz;
|
size_t sz;
|
||||||
|
|
||||||
strlcpy(compressor, buf, sizeof(compressor));
|
strlcpy(compressor, buf, sizeof(compressor));
|
||||||
|
@ -327,7 +327,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
strlcpy(zram->compressor, compressor, sizeof(compressor));
|
strcpy(zram->compressor, compressor);
|
||||||
up_write(&zram->init_lock);
|
up_write(&zram->init_lock);
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
|
@ -235,6 +235,7 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len,
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern u32 pnv_get_supported_cpuidle_states(void);
|
||||||
static int powernv_add_idle_states(void)
|
static int powernv_add_idle_states(void)
|
||||||
{
|
{
|
||||||
struct device_node *power_mgt;
|
struct device_node *power_mgt;
|
||||||
|
@ -248,6 +249,8 @@ static int powernv_add_idle_states(void)
|
||||||
const char *names[CPUIDLE_STATE_MAX];
|
const char *names[CPUIDLE_STATE_MAX];
|
||||||
u32 has_stop_states = 0;
|
u32 has_stop_states = 0;
|
||||||
int i, rc;
|
int i, rc;
|
||||||
|
u32 supported_flags = pnv_get_supported_cpuidle_states();
|
||||||
|
|
||||||
|
|
||||||
/* Currently we have snooze statically defined */
|
/* Currently we have snooze statically defined */
|
||||||
|
|
||||||
|
@ -362,6 +365,13 @@ static int powernv_add_idle_states(void)
|
||||||
for (i = 0; i < dt_idle_states; i++) {
|
for (i = 0; i < dt_idle_states; i++) {
|
||||||
unsigned int exit_latency, target_residency;
|
unsigned int exit_latency, target_residency;
|
||||||
bool stops_timebase = false;
|
bool stops_timebase = false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Skip the platform idle state whose flag isn't in
|
||||||
|
* the supported_cpuidle_states flag mask.
|
||||||
|
*/
|
||||||
|
if ((flags[i] & supported_flags) != flags[i])
|
||||||
|
continue;
|
||||||
/*
|
/*
|
||||||
* If an idle state has exit latency beyond
|
* If an idle state has exit latency beyond
|
||||||
* POWERNV_THRESHOLD_LATENCY_NS then don't use it
|
* POWERNV_THRESHOLD_LATENCY_NS then don't use it
|
||||||
|
|
|
@ -1073,7 +1073,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
|
||||||
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
|
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
|
||||||
&crypt->icv_rev_aes);
|
&crypt->icv_rev_aes);
|
||||||
if (unlikely(!req_ctx->hmac_virt))
|
if (unlikely(!req_ctx->hmac_virt))
|
||||||
goto free_buf_src;
|
goto free_buf_dst;
|
||||||
if (!encrypt) {
|
if (!encrypt) {
|
||||||
scatterwalk_map_and_copy(req_ctx->hmac_virt,
|
scatterwalk_map_and_copy(req_ctx->hmac_virt,
|
||||||
req->src, cryptlen, authsize, 0);
|
req->src, cryptlen, authsize, 0);
|
||||||
|
@ -1088,10 +1088,10 @@ static int aead_perform(struct aead_request *req, int encrypt,
|
||||||
BUG_ON(qmgr_stat_overflow(SEND_QID));
|
BUG_ON(qmgr_stat_overflow(SEND_QID));
|
||||||
return -EINPROGRESS;
|
return -EINPROGRESS;
|
||||||
|
|
||||||
free_buf_src:
|
|
||||||
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
|
|
||||||
free_buf_dst:
|
free_buf_dst:
|
||||||
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
|
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
|
||||||
|
free_buf_src:
|
||||||
|
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
|
||||||
crypt->ctl_flags = CTL_FLAG_UNUSED;
|
crypt->ctl_flags = CTL_FLAG_UNUSED;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
struct sync_file *sync_file = file->private_data;
|
struct sync_file *sync_file = file->private_data;
|
||||||
|
|
||||||
if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
|
if (test_bit(POLL_ENABLED, &sync_file->flags))
|
||||||
dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
|
dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
|
||||||
dma_fence_put(sync_file->fence);
|
dma_fence_put(sync_file->fence);
|
||||||
kfree(sync_file);
|
kfree(sync_file);
|
||||||
|
@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
|
||||||
|
|
||||||
poll_wait(file, &sync_file->wq, wait);
|
poll_wait(file, &sync_file->wq, wait);
|
||||||
|
|
||||||
if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
|
if (list_empty(&sync_file->cb.node) &&
|
||||||
|
!test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
|
||||||
if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
|
if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
|
||||||
fence_check_cb_func) < 0)
|
fence_check_cb_func) < 0)
|
||||||
wake_up_all(&sync_file->wq);
|
wake_up_all(&sync_file->wq);
|
||||||
|
|
|
@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
|
||||||
|
|
||||||
/* port@2 is the output port */
|
/* port@2 is the output port */
|
||||||
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
|
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
|
||||||
if (ret)
|
if (ret && ret != -ENODEV)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Shut down GPIO is optional */
|
/* Shut down GPIO is optional */
|
||||||
|
|
|
@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
|
if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
|
||||||
DRM_ERROR("relocation %u outside object", i);
|
DRM_ERROR("relocation %u outside object\n", i);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -145,13 +145,19 @@ static struct drm_framebuffer *
|
||||||
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
|
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
|
||||||
const struct drm_mode_fb_cmd2 *mode_cmd)
|
const struct drm_mode_fb_cmd2 *mode_cmd)
|
||||||
{
|
{
|
||||||
|
const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
|
||||||
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
|
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
|
||||||
struct drm_gem_object *obj;
|
struct drm_gem_object *obj;
|
||||||
struct drm_framebuffer *fb;
|
struct drm_framebuffer *fb;
|
||||||
int i;
|
int i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
|
for (i = 0; i < info->num_planes; i++) {
|
||||||
|
unsigned int height = (i == 0) ? mode_cmd->height :
|
||||||
|
DIV_ROUND_UP(mode_cmd->height, info->vsub);
|
||||||
|
unsigned long size = height * mode_cmd->pitches[i] +
|
||||||
|
mode_cmd->offsets[i];
|
||||||
|
|
||||||
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
|
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
|
||||||
if (!obj) {
|
if (!obj) {
|
||||||
DRM_ERROR("failed to lookup gem object\n");
|
DRM_ERROR("failed to lookup gem object\n");
|
||||||
|
@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
|
||||||
}
|
}
|
||||||
|
|
||||||
exynos_gem[i] = to_exynos_gem(obj);
|
exynos_gem[i] = to_exynos_gem(obj);
|
||||||
|
|
||||||
|
if (size > exynos_gem[i]->size) {
|
||||||
|
i++;
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
|
fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
|
||||||
|
|
|
@ -46,6 +46,8 @@
|
||||||
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
|
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
|
||||||
((a)->lrca == (b)->lrca))
|
((a)->lrca == (b)->lrca))
|
||||||
|
|
||||||
|
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
|
||||||
|
|
||||||
static int context_switch_events[] = {
|
static int context_switch_events[] = {
|
||||||
[RCS] = RCS_AS_CONTEXT_SWITCH,
|
[RCS] = RCS_AS_CONTEXT_SWITCH,
|
||||||
[BCS] = BCS_AS_CONTEXT_SWITCH,
|
[BCS] = BCS_AS_CONTEXT_SWITCH,
|
||||||
|
@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||||
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
|
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
|
||||||
{
|
{
|
||||||
struct intel_vgpu *vgpu = workload->vgpu;
|
struct intel_vgpu *vgpu = workload->vgpu;
|
||||||
struct intel_vgpu_execlist *execlist =
|
int ring_id = workload->ring_id;
|
||||||
&vgpu->execlist[workload->ring_id];
|
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
|
||||||
struct intel_vgpu_workload *next_workload;
|
struct intel_vgpu_workload *next_workload;
|
||||||
struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
|
struct list_head *next = workload_q_head(vgpu, ring_id)->next;
|
||||||
bool lite_restore = false;
|
bool lite_restore = false;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
|
||||||
release_shadow_batch_buffer(workload);
|
release_shadow_batch_buffer(workload);
|
||||||
release_shadow_wa_ctx(&workload->wa_ctx);
|
release_shadow_wa_ctx(&workload->wa_ctx);
|
||||||
|
|
||||||
if (workload->status || vgpu->resetting)
|
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
|
||||||
|
/* if workload->status is not successful means HW GPU
|
||||||
|
* has occurred GPU hang or something wrong with i915/GVT,
|
||||||
|
* and GVT won't inject context switch interrupt to guest.
|
||||||
|
* So this error is a vGPU hang actually to the guest.
|
||||||
|
* According to this we should emunlate a vGPU hang. If
|
||||||
|
* there are pending workloads which are already submitted
|
||||||
|
* from guest, we should clean them up like HW GPU does.
|
||||||
|
*
|
||||||
|
* if it is in middle of engine resetting, the pending
|
||||||
|
* workloads won't be submitted to HW GPU and will be
|
||||||
|
* cleaned up during the resetting process later, so doing
|
||||||
|
* the workload clean up here doesn't have any impact.
|
||||||
|
**/
|
||||||
|
clean_workloads(vgpu, ENGINE_MASK(ring_id));
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
|
if (!list_empty(workload_q_head(vgpu, ring_id))) {
|
||||||
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
|
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
|
||||||
|
|
||||||
next_workload = container_of(next,
|
next_workload = container_of(next,
|
||||||
|
|
|
@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
|
||||||
struct intel_gvt_device_info *info = &gvt->device_info;
|
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||||
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
|
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
|
||||||
struct intel_gvt_mmio_info *e;
|
struct intel_gvt_mmio_info *e;
|
||||||
|
struct gvt_mmio_block *block = gvt->mmio.mmio_block;
|
||||||
|
int num = gvt->mmio.num_mmio_block;
|
||||||
struct gvt_firmware_header *h;
|
struct gvt_firmware_header *h;
|
||||||
void *firmware;
|
void *firmware;
|
||||||
void *p;
|
void *p;
|
||||||
unsigned long size, crc32_start;
|
unsigned long size, crc32_start;
|
||||||
int i;
|
int i, j;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
|
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
|
||||||
|
@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
|
||||||
hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
|
hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
|
||||||
*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
|
*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
|
||||||
|
|
||||||
|
for (i = 0; i < num; i++, block++) {
|
||||||
|
for (j = 0; j < block->size; j += 4)
|
||||||
|
*(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
|
||||||
|
I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
|
||||||
|
block->offset) + j));
|
||||||
|
}
|
||||||
|
|
||||||
memcpy(gvt->firmware.mmio, p, info->mmio_size);
|
memcpy(gvt->firmware.mmio, p, info->mmio_size);
|
||||||
|
|
||||||
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
|
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
|
||||||
|
|
|
@ -149,7 +149,7 @@ struct intel_vgpu {
|
||||||
bool active;
|
bool active;
|
||||||
bool pv_notified;
|
bool pv_notified;
|
||||||
bool failsafe;
|
bool failsafe;
|
||||||
bool resetting;
|
unsigned int resetting_eng;
|
||||||
void *sched_data;
|
void *sched_data;
|
||||||
struct vgpu_sched_ctl sched_ctl;
|
struct vgpu_sched_ctl sched_ctl;
|
||||||
|
|
||||||
|
@ -195,6 +195,15 @@ struct intel_gvt_fence {
|
||||||
unsigned long vgpu_allocated_fence_num;
|
unsigned long vgpu_allocated_fence_num;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Special MMIO blocks. */
|
||||||
|
struct gvt_mmio_block {
|
||||||
|
unsigned int device;
|
||||||
|
i915_reg_t offset;
|
||||||
|
unsigned int size;
|
||||||
|
gvt_mmio_func read;
|
||||||
|
gvt_mmio_func write;
|
||||||
|
};
|
||||||
|
|
||||||
#define INTEL_GVT_MMIO_HASH_BITS 11
|
#define INTEL_GVT_MMIO_HASH_BITS 11
|
||||||
|
|
||||||
struct intel_gvt_mmio {
|
struct intel_gvt_mmio {
|
||||||
|
@ -214,6 +223,9 @@ struct intel_gvt_mmio {
|
||||||
/* This reg could be accessed by unaligned address */
|
/* This reg could be accessed by unaligned address */
|
||||||
#define F_UNALIGN (1 << 6)
|
#define F_UNALIGN (1 << 6)
|
||||||
|
|
||||||
|
struct gvt_mmio_block *mmio_block;
|
||||||
|
unsigned int num_mmio_block;
|
||||||
|
|
||||||
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
|
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
|
||||||
unsigned int num_tracked_mmio;
|
unsigned int num_tracked_mmio;
|
||||||
};
|
};
|
||||||
|
|
|
@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Special MMIO blocks. */
|
|
||||||
static struct gvt_mmio_block {
|
|
||||||
unsigned int device;
|
|
||||||
i915_reg_t offset;
|
|
||||||
unsigned int size;
|
|
||||||
gvt_mmio_func read;
|
|
||||||
gvt_mmio_func write;
|
|
||||||
} gvt_mmio_blocks[] = {
|
|
||||||
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
|
|
||||||
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
|
|
||||||
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
|
|
||||||
pvinfo_mmio_read, pvinfo_mmio_write},
|
|
||||||
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
|
|
||||||
{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
|
|
||||||
{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
|
static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
|
||||||
unsigned int offset)
|
unsigned int offset)
|
||||||
{
|
{
|
||||||
unsigned long device = intel_gvt_get_device_type(gvt);
|
unsigned long device = intel_gvt_get_device_type(gvt);
|
||||||
struct gvt_mmio_block *block = gvt_mmio_blocks;
|
struct gvt_mmio_block *block = gvt->mmio.mmio_block;
|
||||||
|
int num = gvt->mmio.num_mmio_block;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
|
for (i = 0; i < num; i++, block++) {
|
||||||
if (!(device & block->device))
|
if (!(device & block->device))
|
||||||
continue;
|
continue;
|
||||||
if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
|
if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
|
||||||
|
@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
|
||||||
gvt->mmio.mmio_attribute = NULL;
|
gvt->mmio.mmio_attribute = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Special MMIO blocks. */
|
||||||
|
static struct gvt_mmio_block mmio_blocks[] = {
|
||||||
|
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
|
||||||
|
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
|
||||||
|
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
|
||||||
|
pvinfo_mmio_read, pvinfo_mmio_write},
|
||||||
|
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
|
||||||
|
{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
|
||||||
|
{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
|
* intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
|
||||||
* @gvt: GVT device
|
* @gvt: GVT device
|
||||||
|
@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
gvt->mmio.mmio_block = mmio_blocks;
|
||||||
|
gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
|
||||||
|
|
||||||
gvt_dbg_mmio("traced %u virtual mmio registers\n",
|
gvt_dbg_mmio("traced %u virtual mmio registers\n",
|
||||||
gvt->mmio.num_tracked_mmio);
|
gvt->mmio.num_tracked_mmio);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
|
||||||
gvt_mmio_func func;
|
gvt_mmio_func func;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (WARN_ON(bytes > 4))
|
if (WARN_ON(bytes > 8))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||||
|
|
||||||
i915_gem_request_put(fetch_and_zero(&workload->req));
|
i915_gem_request_put(fetch_and_zero(&workload->req));
|
||||||
|
|
||||||
if (!workload->status && !vgpu->resetting) {
|
if (!workload->status && !(vgpu->resetting_eng &
|
||||||
|
ENGINE_MASK(ring_id))) {
|
||||||
update_guest_context(workload);
|
update_guest_context(workload);
|
||||||
|
|
||||||
for_each_set_bit(event, workload->pending_events,
|
for_each_set_bit(event, workload->pending_events,
|
||||||
|
|
|
@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||||
{
|
{
|
||||||
struct intel_gvt *gvt = vgpu->gvt;
|
struct intel_gvt *gvt = vgpu->gvt;
|
||||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||||
|
unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
|
||||||
|
|
||||||
gvt_dbg_core("------------------------------------------\n");
|
gvt_dbg_core("------------------------------------------\n");
|
||||||
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
|
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
|
||||||
vgpu->id, dmlr, engine_mask);
|
vgpu->id, dmlr, engine_mask);
|
||||||
vgpu->resetting = true;
|
|
||||||
|
vgpu->resetting_eng = resetting_eng;
|
||||||
|
|
||||||
intel_vgpu_stop_schedule(vgpu);
|
intel_vgpu_stop_schedule(vgpu);
|
||||||
/*
|
/*
|
||||||
|
@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||||
mutex_lock(&gvt->lock);
|
mutex_lock(&gvt->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
|
intel_vgpu_reset_execlist(vgpu, resetting_eng);
|
||||||
|
|
||||||
/* full GPU reset or device model level reset */
|
/* full GPU reset or device model level reset */
|
||||||
if (engine_mask == ALL_ENGINES || dmlr) {
|
if (engine_mask == ALL_ENGINES || dmlr) {
|
||||||
|
@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
vgpu->resetting = false;
|
vgpu->resetting_eng = 0;
|
||||||
gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
|
gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
|
||||||
gvt_dbg_core("------------------------------------------\n");
|
gvt_dbg_core("------------------------------------------\n");
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
case MUTEX_TRYLOCK_FAILED:
|
case MUTEX_TRYLOCK_FAILED:
|
||||||
|
*unlock = false;
|
||||||
|
preempt_disable();
|
||||||
do {
|
do {
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
|
if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
|
||||||
case MUTEX_TRYLOCK_SUCCESS:
|
|
||||||
*unlock = true;
|
*unlock = true;
|
||||||
return true;
|
break;
|
||||||
}
|
}
|
||||||
} while (!need_resched());
|
} while (!need_resched());
|
||||||
|
preempt_enable();
|
||||||
|
return *unlock;
|
||||||
|
|
||||||
return false;
|
case MUTEX_TRYLOCK_SUCCESS:
|
||||||
|
*unlock = true;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG();
|
BUG();
|
||||||
|
|
|
@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
|
||||||
u32 *cs;
|
u32 *cs;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
cs = intel_ring_begin(req, n_flex_regs * 2 + 4);
|
cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
|
||||||
if (IS_ERR(cs))
|
if (IS_ERR(cs))
|
||||||
return PTR_ERR(cs);
|
return PTR_ERR(cs);
|
||||||
|
|
||||||
*cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1);
|
*cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
|
||||||
|
|
||||||
*cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
|
*cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
|
||||||
*cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
|
*cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
|
||||||
|
|
|
@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Program the max register to clamp values > 1.0. */
|
/* Program the max register to clamp values > 1.0. */
|
||||||
|
i = lut_size - 1;
|
||||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
|
I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
|
||||||
drm_color_lut_extract(lut[i].red, 16));
|
drm_color_lut_extract(lut[i].red, 16));
|
||||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
|
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
|
||||||
|
|
|
@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
|
||||||
|
|
||||||
if (i915.invert_brightness > 0 ||
|
if (i915.invert_brightness > 0 ||
|
||||||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
|
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
|
||||||
return panel->backlight.max - val;
|
return panel->backlight.max - val + panel->backlight.min;
|
||||||
}
|
}
|
||||||
|
|
||||||
return val;
|
return val;
|
||||||
|
|
|
@ -5,7 +5,7 @@ config DRM_MSM
|
||||||
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
|
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
|
||||||
depends on OF && COMMON_CLK
|
depends on OF && COMMON_CLK
|
||||||
depends on MMU
|
depends on MMU
|
||||||
select QCOM_MDT_LOADER
|
select QCOM_MDT_LOADER if ARCH_QCOM
|
||||||
select REGULATOR
|
select REGULATOR
|
||||||
select DRM_KMS_HELPER
|
select DRM_KMS_HELPER
|
||||||
select DRM_PANEL
|
select DRM_PANEL
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
#include <linux/qcom_scm.h>
|
#include <linux/qcom_scm.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/of_reserved_mem.h>
|
#include <linux/of_address.h>
|
||||||
#include <linux/soc/qcom/mdt_loader.h>
|
#include <linux/soc/qcom/mdt_loader.h>
|
||||||
#include "msm_gem.h"
|
#include "msm_gem.h"
|
||||||
#include "msm_mmu.h"
|
#include "msm_mmu.h"
|
||||||
|
@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
|
||||||
|
|
||||||
#define GPU_PAS_ID 13
|
#define GPU_PAS_ID 13
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
|
|
||||||
|
|
||||||
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
|
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
|
||||||
{
|
{
|
||||||
const struct firmware *fw;
|
const struct firmware *fw;
|
||||||
|
struct device_node *np;
|
||||||
|
struct resource r;
|
||||||
phys_addr_t mem_phys;
|
phys_addr_t mem_phys;
|
||||||
ssize_t mem_size;
|
ssize_t mem_size;
|
||||||
void *mem_region = NULL;
|
void *mem_region = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!IS_ENABLED(CONFIG_ARCH_QCOM))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
np = of_get_child_by_name(dev->of_node, "zap-shader");
|
||||||
|
if (!np)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
np = of_parse_phandle(np, "memory-region", 0);
|
||||||
|
if (!np)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ret = of_address_to_resource(np, 0, &r);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
mem_phys = r.start;
|
||||||
|
mem_size = resource_size(&r);
|
||||||
|
|
||||||
/* Request the MDT file for the firmware */
|
/* Request the MDT file for the firmware */
|
||||||
ret = request_firmware(&fw, fwname, dev);
|
ret = request_firmware(&fw, fwname, dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate memory for the firmware image */
|
/* Allocate memory for the firmware image */
|
||||||
mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL);
|
mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
|
||||||
if (!mem_region) {
|
if (!mem_region) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
|
||||||
DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
|
DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
if (mem_region)
|
||||||
|
memunmap(mem_region);
|
||||||
|
|
||||||
release_firmware(fw);
|
release_firmware(fw);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
|
|
||||||
{
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||||
struct msm_file_private *ctx)
|
struct msm_file_private *ctx)
|
||||||
|
@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||||
gpu->funcs->flush(gpu);
|
gpu->funcs->flush(gpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct a5xx_hwcg {
|
static const struct {
|
||||||
u32 offset;
|
u32 offset;
|
||||||
u32 value;
|
u32 value;
|
||||||
};
|
} a5xx_hwcg[] = {
|
||||||
|
|
||||||
static const struct a5xx_hwcg a530_hwcg[] = {
|
|
||||||
{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
|
{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
|
||||||
{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
|
{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
|
||||||
{REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
|
{REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
|
||||||
|
@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
|
||||||
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
|
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct {
|
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
|
||||||
int (*test)(struct adreno_gpu *gpu);
|
|
||||||
const struct a5xx_hwcg *regs;
|
|
||||||
unsigned int count;
|
|
||||||
} a5xx_hwcg_regs[] = {
|
|
||||||
{ adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
|
|
||||||
};
|
|
||||||
|
|
||||||
static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
|
|
||||||
const struct a5xx_hwcg *regs, unsigned int count)
|
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
for (i = 0; i < count; i++)
|
for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
|
||||||
gpu_write(gpu, regs[i].offset, regs[i].value);
|
gpu_write(gpu, a5xx_hwcg[i].offset,
|
||||||
|
state ? a5xx_hwcg[i].value : 0);
|
||||||
|
|
||||||
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
|
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
|
||||||
gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
|
gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
|
||||||
}
|
|
||||||
|
|
||||||
static void a5xx_enable_hwcg(struct msm_gpu *gpu)
|
|
||||||
{
|
|
||||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
|
|
||||||
if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
|
|
||||||
_a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
|
|
||||||
a5xx_hwcg_regs[i].count);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int a5xx_me_init(struct msm_gpu *gpu)
|
static int a5xx_me_init(struct msm_gpu *gpu)
|
||||||
|
@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set up a child device to "own" the zap shader */
|
|
||||||
static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
|
|
||||||
{
|
|
||||||
struct device_node *node;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (dev->parent)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Find the sub-node for the zap shader */
|
|
||||||
node = of_get_child_by_name(parent->of_node, "zap-shader");
|
|
||||||
if (!node) {
|
|
||||||
DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev->parent = parent;
|
|
||||||
dev->of_node = node;
|
|
||||||
dev_set_name(dev, "adreno_zap_shader");
|
|
||||||
|
|
||||||
ret = device_register(dev);
|
|
||||||
if (ret) {
|
|
||||||
DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = of_reserved_mem_device_init(dev);
|
|
||||||
if (ret) {
|
|
||||||
DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
|
|
||||||
device_unregister(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
|
||||||
if (ret)
|
|
||||||
dev->parent = NULL;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int a5xx_zap_shader_init(struct msm_gpu *gpu)
|
static int a5xx_zap_shader_init(struct msm_gpu *gpu)
|
||||||
{
|
{
|
||||||
static bool loaded;
|
static bool loaded;
|
||||||
|
@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev);
|
ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
|
||||||
|
|
||||||
if (!ret)
|
|
||||||
ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
|
|
||||||
adreno_gpu->info->zapfw);
|
|
||||||
|
|
||||||
loaded = !ret;
|
loaded = !ret;
|
||||||
|
|
||||||
|
@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
||||||
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
|
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
|
||||||
|
|
||||||
/* Enable HWCG */
|
/* Enable HWCG */
|
||||||
a5xx_enable_hwcg(gpu);
|
a5xx_set_hwcg(gpu, true);
|
||||||
|
|
||||||
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
|
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
|
||||||
|
|
||||||
|
@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
|
||||||
|
|
||||||
DBG("%s", gpu->name);
|
DBG("%s", gpu->name);
|
||||||
|
|
||||||
if (a5xx_gpu->zap_dev.parent)
|
|
||||||
device_unregister(&a5xx_gpu->zap_dev);
|
|
||||||
|
|
||||||
if (a5xx_gpu->pm4_bo) {
|
if (a5xx_gpu->pm4_bo) {
|
||||||
if (a5xx_gpu->pm4_iova)
|
if (a5xx_gpu->pm4_iova)
|
||||||
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
|
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
|
||||||
|
@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = {
|
||||||
0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
|
0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
|
||||||
0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
|
0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
|
||||||
0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
|
0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
|
||||||
0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
|
0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
|
||||||
0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
|
0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
|
||||||
0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
|
0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
|
||||||
0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
|
0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
|
||||||
0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
|
0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
|
||||||
0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
|
0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
|
||||||
0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
|
0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
|
||||||
0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
|
0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
|
||||||
0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
|
0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
|
||||||
0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
|
0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
|
||||||
0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
|
0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
|
||||||
0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
|
0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
|
||||||
0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
|
0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
|
||||||
0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
|
0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
|
||||||
0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
|
0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
|
||||||
0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
|
0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
|
||||||
0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
|
0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
|
||||||
0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
|
0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
|
||||||
0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
|
0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
|
||||||
0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
|
0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
|
||||||
0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
|
0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
|
||||||
0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
|
0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
|
||||||
0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
|
0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
|
||||||
0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
|
0xB9A0, 0xB9BF, ~0
|
||||||
~0
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static void a5xx_dump(struct msm_gpu *gpu)
|
static void a5xx_dump(struct msm_gpu *gpu)
|
||||||
|
@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
|
||||||
{
|
{
|
||||||
seq_printf(m, "status: %08x\n",
|
seq_printf(m, "status: %08x\n",
|
||||||
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
|
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Temporarily disable hardware clock gating before going into
|
||||||
|
* adreno_show to avoid issues while reading the registers
|
||||||
|
*/
|
||||||
|
a5xx_set_hwcg(gpu, false);
|
||||||
adreno_show(gpu, m);
|
adreno_show(gpu, m);
|
||||||
|
a5xx_set_hwcg(gpu, true);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -36,8 +36,6 @@ struct a5xx_gpu {
|
||||||
uint32_t gpmu_dwords;
|
uint32_t gpmu_dwords;
|
||||||
|
|
||||||
uint32_t lm_leakage;
|
uint32_t lm_leakage;
|
||||||
|
|
||||||
struct device zap_dev;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
|
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
|
||||||
|
@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
|
||||||
}
|
}
|
||||||
|
|
||||||
bool a5xx_idle(struct msm_gpu *gpu);
|
bool a5xx_idle(struct msm_gpu *gpu);
|
||||||
|
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
|
||||||
|
|
||||||
#endif /* __A5XX_GPU_H__ */
|
#endif /* __A5XX_GPU_H__ */
|
||||||
|
|
|
@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
|
||||||
*value = adreno_gpu->base.fast_rate;
|
*value = adreno_gpu->base.fast_rate;
|
||||||
return 0;
|
return 0;
|
||||||
case MSM_PARAM_TIMESTAMP:
|
case MSM_PARAM_TIMESTAMP:
|
||||||
if (adreno_gpu->funcs->get_timestamp)
|
if (adreno_gpu->funcs->get_timestamp) {
|
||||||
return adreno_gpu->funcs->get_timestamp(gpu, value);
|
int ret;
|
||||||
|
|
||||||
|
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||||
|
ret = adreno_gpu->funcs->get_timestamp(gpu, value);
|
||||||
|
pm_runtime_put_autosuspend(&gpu->pdev->dev);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
default:
|
default:
|
||||||
DBG("%s: invalid param: %u", gpu->name, param);
|
DBG("%s: invalid param: %u", gpu->name, param);
|
||||||
|
|
|
@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
|
||||||
struct msm_dsi_phy_clk_request *clk_req)
|
struct msm_dsi_phy_clk_request *clk_req)
|
||||||
{
|
{
|
||||||
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
|
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = dsi_calc_clk_rate(msm_host);
|
||||||
|
if (ret) {
|
||||||
|
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
|
clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
|
||||||
clk_req->escclk_rate = msm_host->esc_clk_rate;
|
clk_req->escclk_rate = msm_host->esc_clk_rate;
|
||||||
|
@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
|
||||||
struct drm_display_mode *mode)
|
struct drm_display_mode *mode)
|
||||||
{
|
{
|
||||||
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
|
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (msm_host->mode) {
|
if (msm_host->mode) {
|
||||||
drm_mode_destroy(msm_host->dev, msm_host->mode);
|
drm_mode_destroy(msm_host->dev, msm_host->mode);
|
||||||
|
@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = dsi_calc_clk_rate(msm_host);
|
|
||||||
if (ret) {
|
|
||||||
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc)
|
||||||
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
|
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
|
||||||
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
|
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
|
enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
|
||||||
enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
|
enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
|
||||||
int i, plane_cnt = 0;
|
int i, plane_cnt = 0;
|
||||||
bool bg_alpha_enabled = false;
|
bool bg_alpha_enabled = false;
|
||||||
u32 mixer_op_mode = 0;
|
u32 mixer_op_mode = 0;
|
||||||
|
@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||||
if (!handle) {
|
if (!handle) {
|
||||||
DBG("Cursor off");
|
DBG("Cursor off");
|
||||||
cursor_enable = false;
|
cursor_enable = false;
|
||||||
|
mdp5_enable(mdp5_kms);
|
||||||
goto set_cursor;
|
goto set_cursor;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||||
|
|
||||||
get_roi(crtc, &roi_w, &roi_h);
|
get_roi(crtc, &roi_w, &roi_h);
|
||||||
|
|
||||||
|
mdp5_enable(mdp5_kms);
|
||||||
|
|
||||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
|
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
|
||||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
|
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
|
||||||
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
|
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
|
||||||
|
@ -804,6 +807,7 @@ set_cursor:
|
||||||
crtc_flush(crtc, flush_mask);
|
crtc_flush(crtc, flush_mask);
|
||||||
|
|
||||||
end:
|
end:
|
||||||
|
mdp5_disable(mdp5_kms);
|
||||||
if (old_bo) {
|
if (old_bo) {
|
||||||
drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
|
drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
|
||||||
/* enable vblank to complete cursor work: */
|
/* enable vblank to complete cursor work: */
|
||||||
|
@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
|
||||||
|
|
||||||
get_roi(crtc, &roi_w, &roi_h);
|
get_roi(crtc, &roi_w, &roi_h);
|
||||||
|
|
||||||
|
mdp5_enable(mdp5_kms);
|
||||||
|
|
||||||
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
|
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
|
||||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
|
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
|
||||||
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
|
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
|
||||||
|
@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
|
||||||
|
|
||||||
crtc_flush(crtc, flush_mask);
|
crtc_flush(crtc, flush_mask);
|
||||||
|
|
||||||
|
mdp5_disable(mdp5_kms);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
|
||||||
struct mdp5_interface *intf = mdp5_encoder->intf;
|
struct mdp5_interface *intf = mdp5_encoder->intf;
|
||||||
|
|
||||||
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
|
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
|
||||||
mdp5_cmd_encoder_disable(encoder);
|
mdp5_cmd_encoder_enable(encoder);
|
||||||
else
|
else
|
||||||
mdp5_vid_encoder_enable(encoder);
|
mdp5_vid_encoder_enable(encoder);
|
||||||
}
|
}
|
||||||
|
|
|
@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
|
||||||
const char *name, bool mandatory)
|
const char *name, bool mandatory)
|
||||||
{
|
{
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
struct clk *clk = devm_clk_get(dev, name);
|
struct clk *clk = msm_clk_get(pdev, name);
|
||||||
if (IS_ERR(clk) && mandatory) {
|
if (IS_ERR(clk) && mandatory) {
|
||||||
dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
|
dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
|
||||||
return PTR_ERR(clk);
|
return PTR_ERR(clk);
|
||||||
|
@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* mandatory clocks: */
|
/* mandatory clocks: */
|
||||||
ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);
|
ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
|
ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
|
ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);
|
ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
/* optional clocks: */
|
/* optional clocks: */
|
||||||
get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);
|
get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
|
||||||
|
|
||||||
/* we need to set a default rate before enabling. Set a safe
|
/* we need to set a default rate before enabling. Set a safe
|
||||||
* rate first, then figure out hw revision, and then set a
|
* rate first, then figure out hw revision, and then set a
|
||||||
|
|
|
@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
|
||||||
struct mdp5_hw_pipe *right_hwpipe;
|
struct mdp5_hw_pipe *right_hwpipe;
|
||||||
const struct mdp_format *format;
|
const struct mdp_format *format;
|
||||||
uint32_t nplanes, config = 0;
|
uint32_t nplanes, config = 0;
|
||||||
struct phase_step step = { 0 };
|
struct phase_step step = { { 0 } };
|
||||||
struct pixel_ext pe = { 0 };
|
struct pixel_ext pe = { { 0 } };
|
||||||
uint32_t hdecm = 0, vdecm = 0;
|
uint32_t hdecm = 0, vdecm = 0;
|
||||||
uint32_t pix_format;
|
uint32_t pix_format;
|
||||||
unsigned int rotation;
|
unsigned int rotation;
|
||||||
|
|
|
@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
|
|
||||||
vma = add_vma(obj, aspace);
|
vma = add_vma(obj, aspace);
|
||||||
if (IS_ERR(vma))
|
if (IS_ERR(vma)) {
|
||||||
return PTR_ERR(vma);
|
ret = PTR_ERR(vma);
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
pages = get_pages(obj);
|
pages = get_pages(obj);
|
||||||
if (IS_ERR(pages)) {
|
if (IS_ERR(pages)) {
|
||||||
|
@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
del_vma(vma);
|
del_vma(vma);
|
||||||
|
unlock:
|
||||||
mutex_unlock(&msm_obj->lock);
|
mutex_unlock(&msm_obj->lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
||||||
if (use_vram) {
|
if (use_vram) {
|
||||||
struct msm_gem_vma *vma;
|
struct msm_gem_vma *vma;
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
|
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||||
|
|
||||||
|
mutex_lock(&msm_obj->lock);
|
||||||
|
|
||||||
vma = add_vma(obj, NULL);
|
vma = add_vma(obj, NULL);
|
||||||
|
mutex_unlock(&msm_obj->lock);
|
||||||
if (IS_ERR(vma)) {
|
if (IS_ERR(vma)) {
|
||||||
ret = PTR_ERR(vma);
|
ret = PTR_ERR(vma);
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
|
@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
|
||||||
struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
|
struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
|
||||||
{
|
{
|
||||||
struct msm_gem_submit *submit;
|
struct msm_gem_submit *submit;
|
||||||
uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
|
uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
|
||||||
(nr_cmds * sizeof(submit->cmd[0]));
|
((u64)nr_cmds * sizeof(submit->cmd[0]));
|
||||||
|
|
||||||
if (sz > SIZE_MAX)
|
if (sz > SIZE_MAX)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
|
if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
|
||||||
ret = submit_fence_sync(submit);
|
ret = submit_fence_sync(submit);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -42,7 +42,7 @@ void
|
||||||
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
||||||
struct msm_gem_vma *vma, struct sg_table *sgt)
|
struct msm_gem_vma *vma, struct sg_table *sgt)
|
||||||
{
|
{
|
||||||
if (!vma->iova)
|
if (!aspace || !vma->iova)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (aspace->mmu) {
|
if (aspace->mmu) {
|
||||||
|
|
|
@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
|
||||||
/* Create output path objects for each VBIOS display path. */
|
/* Create output path objects for each VBIOS display path. */
|
||||||
i = -1;
|
i = -1;
|
||||||
while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
|
while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
|
||||||
|
if (ver < 0x40) /* No support for chipsets prior to NV50. */
|
||||||
|
break;
|
||||||
if (dcbE.type == DCB_OUTPUT_UNUSED)
|
if (dcbE.type == DCB_OUTPUT_UNUSED)
|
||||||
continue;
|
continue;
|
||||||
if (dcbE.type == DCB_OUTPUT_EOL)
|
if (dcbE.type == DCB_OUTPUT_EOL)
|
||||||
|
|
|
@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop)
|
||||||
static int vop_enable(struct drm_crtc *crtc)
|
static int vop_enable(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct vop *vop = to_vop(crtc);
|
struct vop *vop = to_vop(crtc);
|
||||||
int ret;
|
int ret, i;
|
||||||
|
|
||||||
ret = pm_runtime_get_sync(vop->dev);
|
ret = pm_runtime_get_sync(vop->dev);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc)
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(vop->regs, vop->regsbak, vop->len);
|
memcpy(vop->regs, vop->regsbak, vop->len);
|
||||||
|
/*
|
||||||
|
* We need to make sure that all windows are disabled before we
|
||||||
|
* enable the crtc. Otherwise we might try to scan from a destroyed
|
||||||
|
* buffer later.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < vop->data->win_size; i++) {
|
||||||
|
struct vop_win *vop_win = &vop->win[i];
|
||||||
|
const struct vop_win_data *win = vop_win->data;
|
||||||
|
|
||||||
|
spin_lock(&vop->reg_lock);
|
||||||
|
VOP_WIN_SET(vop, win, enable, 0);
|
||||||
|
spin_unlock(&vop->reg_lock);
|
||||||
|
}
|
||||||
|
|
||||||
vop_cfg_done(vop);
|
vop_cfg_done(vop);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -566,28 +580,11 @@ err_put_pm_runtime:
|
||||||
static void vop_crtc_disable(struct drm_crtc *crtc)
|
static void vop_crtc_disable(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct vop *vop = to_vop(crtc);
|
struct vop *vop = to_vop(crtc);
|
||||||
int i;
|
|
||||||
|
|
||||||
WARN_ON(vop->event);
|
WARN_ON(vop->event);
|
||||||
|
|
||||||
rockchip_drm_psr_deactivate(&vop->crtc);
|
rockchip_drm_psr_deactivate(&vop->crtc);
|
||||||
|
|
||||||
/*
|
|
||||||
* We need to make sure that all windows are disabled before we
|
|
||||||
* disable that crtc. Otherwise we might try to scan from a destroyed
|
|
||||||
* buffer later.
|
|
||||||
*/
|
|
||||||
for (i = 0; i < vop->data->win_size; i++) {
|
|
||||||
struct vop_win *vop_win = &vop->win[i];
|
|
||||||
const struct vop_win_data *win = vop_win->data;
|
|
||||||
|
|
||||||
spin_lock(&vop->reg_lock);
|
|
||||||
VOP_WIN_SET(vop, win, enable, 0);
|
|
||||||
spin_unlock(&vop->reg_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
vop_cfg_done(vop);
|
|
||||||
|
|
||||||
drm_crtc_vblank_off(crtc);
|
drm_crtc_vblank_off(crtc);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
|
||||||
* Src.x1 can be odd when do clip, but yuv plane start point
|
* Src.x1 can be odd when do clip, but yuv plane start point
|
||||||
* need align with 2 pixel.
|
* need align with 2 pixel.
|
||||||
*/
|
*/
|
||||||
if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))
|
if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
|
||||||
|
DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
|
||||||
spin_lock(&vop->reg_lock);
|
spin_lock(&vop->reg_lock);
|
||||||
|
|
||||||
VOP_WIN_SET(vop, win, format, format);
|
VOP_WIN_SET(vop, win, format, format);
|
||||||
VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
|
VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
|
||||||
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
|
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
|
||||||
if (is_yuv_support(fb->format->format)) {
|
if (is_yuv_support(fb->format->format)) {
|
||||||
int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
|
int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
|
||||||
|
@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
|
||||||
offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
|
offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
|
||||||
|
|
||||||
dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
|
dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
|
||||||
VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
|
VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
|
||||||
VOP_WIN_SET(vop, win, uv_mst, dma_addr);
|
VOP_WIN_SET(vop, win, uv_mst, dma_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
|
||||||
|
|
||||||
act_height = (src_h + vskiplines - 1) / vskiplines;
|
act_height = (src_h + vskiplines - 1) / vskiplines;
|
||||||
|
|
||||||
|
if (act_height == dst_h)
|
||||||
|
return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
|
||||||
|
|
||||||
return GET_SCL_FT_BILI_DN(act_height, dst_h);
|
return GET_SCL_FT_BILI_DN(act_height, dst_h);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,6 @@ config DRM_STM
|
||||||
select DRM_PANEL
|
select DRM_PANEL
|
||||||
select VIDEOMODE_HELPERS
|
select VIDEOMODE_HELPERS
|
||||||
select FB_PROVIDE_GET_FB_UNMAPPED_AREA
|
select FB_PROVIDE_GET_FB_UNMAPPED_AREA
|
||||||
default y
|
|
||||||
|
|
||||||
help
|
help
|
||||||
Enable support for the on-chip display controller on
|
Enable support for the on-chip display controller on
|
||||||
|
|
|
@ -193,7 +193,6 @@ struct bmc150_accel_data {
|
||||||
struct regmap *regmap;
|
struct regmap *regmap;
|
||||||
int irq;
|
int irq;
|
||||||
struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
|
struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
|
||||||
atomic_t active_intr;
|
|
||||||
struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
|
struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
u8 fifo_mode, watermark;
|
u8 fifo_mode, watermark;
|
||||||
|
@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
|
||||||
goto out_fix_power_state;
|
goto out_fix_power_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (state)
|
|
||||||
atomic_inc(&data->active_intr);
|
|
||||||
else
|
|
||||||
atomic_dec(&data->active_intr);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_fix_power_state:
|
out_fix_power_state:
|
||||||
|
@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev)
|
||||||
struct bmc150_accel_data *data = iio_priv(indio_dev);
|
struct bmc150_accel_data *data = iio_priv(indio_dev);
|
||||||
|
|
||||||
mutex_lock(&data->mutex);
|
mutex_lock(&data->mutex);
|
||||||
if (atomic_read(&data->active_intr))
|
bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
|
||||||
bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
|
|
||||||
bmc150_accel_fifo_set_mode(data);
|
bmc150_accel_fifo_set_mode(data);
|
||||||
mutex_unlock(&data->mutex);
|
mutex_unlock(&data->mutex);
|
||||||
|
|
||||||
|
|
|
@ -166,6 +166,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
||||||
.mask_ihl = 0x02,
|
.mask_ihl = 0x02,
|
||||||
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
||||||
},
|
},
|
||||||
|
.sim = {
|
||||||
|
.addr = 0x23,
|
||||||
|
.value = BIT(0),
|
||||||
|
},
|
||||||
.multi_read_bit = true,
|
.multi_read_bit = true,
|
||||||
.bootime = 2,
|
.bootime = 2,
|
||||||
},
|
},
|
||||||
|
@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
||||||
.mask_od = 0x40,
|
.mask_od = 0x40,
|
||||||
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
||||||
},
|
},
|
||||||
|
.sim = {
|
||||||
|
.addr = 0x23,
|
||||||
|
.value = BIT(0),
|
||||||
|
},
|
||||||
.multi_read_bit = true,
|
.multi_read_bit = true,
|
||||||
.bootime = 2,
|
.bootime = 2,
|
||||||
},
|
},
|
||||||
|
@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
||||||
.en_mask = 0x08,
|
.en_mask = 0x08,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
.sim = {
|
||||||
|
.addr = 0x24,
|
||||||
|
.value = BIT(0),
|
||||||
|
},
|
||||||
.multi_read_bit = false,
|
.multi_read_bit = false,
|
||||||
.bootime = 2,
|
.bootime = 2,
|
||||||
},
|
},
|
||||||
|
@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
||||||
.mask_int1 = 0x04,
|
.mask_int1 = 0x04,
|
||||||
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
||||||
},
|
},
|
||||||
|
.sim = {
|
||||||
|
.addr = 0x21,
|
||||||
|
.value = BIT(1),
|
||||||
|
},
|
||||||
.multi_read_bit = true,
|
.multi_read_bit = true,
|
||||||
.bootime = 2, /* guess */
|
.bootime = 2, /* guess */
|
||||||
},
|
},
|
||||||
|
@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
||||||
.mask_od = 0x40,
|
.mask_od = 0x40,
|
||||||
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
||||||
},
|
},
|
||||||
|
.sim = {
|
||||||
|
.addr = 0x21,
|
||||||
|
.value = BIT(7),
|
||||||
|
},
|
||||||
.multi_read_bit = false,
|
.multi_read_bit = false,
|
||||||
.bootime = 2, /* guess */
|
.bootime = 2, /* guess */
|
||||||
},
|
},
|
||||||
|
@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
||||||
.addr_ihl = 0x22,
|
.addr_ihl = 0x22,
|
||||||
.mask_ihl = 0x80,
|
.mask_ihl = 0x80,
|
||||||
},
|
},
|
||||||
|
.sim = {
|
||||||
|
.addr = 0x23,
|
||||||
|
.value = BIT(0),
|
||||||
|
},
|
||||||
.multi_read_bit = true,
|
.multi_read_bit = true,
|
||||||
.bootime = 2,
|
.bootime = 2,
|
||||||
},
|
},
|
||||||
|
@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
||||||
.mask_int1 = 0x04,
|
.mask_int1 = 0x04,
|
||||||
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
||||||
},
|
},
|
||||||
|
.sim = {
|
||||||
|
.addr = 0x21,
|
||||||
|
.value = BIT(1),
|
||||||
|
},
|
||||||
.multi_read_bit = false,
|
.multi_read_bit = false,
|
||||||
.bootime = 2,
|
.bootime = 2,
|
||||||
},
|
},
|
||||||
|
@ -614,6 +642,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
||||||
.mask_ihl = 0x02,
|
.mask_ihl = 0x02,
|
||||||
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
||||||
},
|
},
|
||||||
|
.sim = {
|
||||||
|
.addr = 0x23,
|
||||||
|
.value = BIT(0),
|
||||||
|
},
|
||||||
.multi_read_bit = true,
|
.multi_read_bit = true,
|
||||||
.bootime = 2,
|
.bootime = 2,
|
||||||
},
|
},
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
|
|
||||||
#include <linux/iio/iio.h>
|
#include <linux/iio/iio.h>
|
||||||
#include <linux/iio/driver.h>
|
#include <linux/iio/driver.h>
|
||||||
|
#include <linux/iopoll.h>
|
||||||
|
|
||||||
#define ASPEED_RESOLUTION_BITS 10
|
#define ASPEED_RESOLUTION_BITS 10
|
||||||
#define ASPEED_CLOCKS_PER_SAMPLE 12
|
#define ASPEED_CLOCKS_PER_SAMPLE 12
|
||||||
|
@ -38,11 +39,17 @@
|
||||||
|
|
||||||
#define ASPEED_ENGINE_ENABLE BIT(0)
|
#define ASPEED_ENGINE_ENABLE BIT(0)
|
||||||
|
|
||||||
|
#define ASPEED_ADC_CTRL_INIT_RDY BIT(8)
|
||||||
|
|
||||||
|
#define ASPEED_ADC_INIT_POLLING_TIME 500
|
||||||
|
#define ASPEED_ADC_INIT_TIMEOUT 500000
|
||||||
|
|
||||||
struct aspeed_adc_model_data {
|
struct aspeed_adc_model_data {
|
||||||
const char *model_name;
|
const char *model_name;
|
||||||
unsigned int min_sampling_rate; // Hz
|
unsigned int min_sampling_rate; // Hz
|
||||||
unsigned int max_sampling_rate; // Hz
|
unsigned int max_sampling_rate; // Hz
|
||||||
unsigned int vref_voltage; // mV
|
unsigned int vref_voltage; // mV
|
||||||
|
bool wait_init_sequence;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct aspeed_adc_data {
|
struct aspeed_adc_data {
|
||||||
|
@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev)
|
||||||
goto scaler_error;
|
goto scaler_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
model_data = of_device_get_match_data(&pdev->dev);
|
||||||
|
|
||||||
|
if (model_data->wait_init_sequence) {
|
||||||
|
/* Enable engine in normal mode. */
|
||||||
|
writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE,
|
||||||
|
data->base + ASPEED_REG_ENGINE_CONTROL);
|
||||||
|
|
||||||
|
/* Wait for initial sequence complete. */
|
||||||
|
ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL,
|
||||||
|
adc_engine_control_reg_val,
|
||||||
|
adc_engine_control_reg_val &
|
||||||
|
ASPEED_ADC_CTRL_INIT_RDY,
|
||||||
|
ASPEED_ADC_INIT_POLLING_TIME,
|
||||||
|
ASPEED_ADC_INIT_TIMEOUT);
|
||||||
|
if (ret)
|
||||||
|
goto scaler_error;
|
||||||
|
}
|
||||||
|
|
||||||
/* Start all channels in normal mode. */
|
/* Start all channels in normal mode. */
|
||||||
ret = clk_prepare_enable(data->clk_scaler->clk);
|
ret = clk_prepare_enable(data->clk_scaler->clk);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -274,6 +299,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = {
|
||||||
.vref_voltage = 1800, // mV
|
.vref_voltage = 1800, // mV
|
||||||
.min_sampling_rate = 1,
|
.min_sampling_rate = 1,
|
||||||
.max_sampling_rate = 1000000,
|
.max_sampling_rate = 1000000,
|
||||||
|
.wait_init_sequence = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct of_device_id aspeed_adc_matches[] = {
|
static const struct of_device_id aspeed_adc_matches[] = {
|
||||||
|
|
|
@ -28,6 +28,8 @@
|
||||||
#include <linux/iio/driver.h>
|
#include <linux/iio/driver.h>
|
||||||
|
|
||||||
#define AXP288_ADC_EN_MASK 0xF1
|
#define AXP288_ADC_EN_MASK 0xF1
|
||||||
|
#define AXP288_ADC_TS_PIN_GPADC 0xF2
|
||||||
|
#define AXP288_ADC_TS_PIN_ON 0xF3
|
||||||
|
|
||||||
enum axp288_adc_id {
|
enum axp288_adc_id {
|
||||||
AXP288_ADC_TS,
|
AXP288_ADC_TS,
|
||||||
|
@ -121,6 +123,26 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
|
||||||
return IIO_VAL_INT;
|
return IIO_VAL_INT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
|
||||||
|
unsigned long address)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* channels other than GPADC do not need to switch TS pin */
|
||||||
|
if (address != AXP288_GP_ADC_H)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/* When switching to the GPADC pin give things some time to settle */
|
||||||
|
if (mode == AXP288_ADC_TS_PIN_GPADC)
|
||||||
|
usleep_range(6000, 10000);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int axp288_adc_read_raw(struct iio_dev *indio_dev,
|
static int axp288_adc_read_raw(struct iio_dev *indio_dev,
|
||||||
struct iio_chan_spec const *chan,
|
struct iio_chan_spec const *chan,
|
||||||
int *val, int *val2, long mask)
|
int *val, int *val2, long mask)
|
||||||
|
@ -131,7 +153,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
|
||||||
mutex_lock(&indio_dev->mlock);
|
mutex_lock(&indio_dev->mlock);
|
||||||
switch (mask) {
|
switch (mask) {
|
||||||
case IIO_CHAN_INFO_RAW:
|
case IIO_CHAN_INFO_RAW:
|
||||||
|
if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
|
||||||
|
chan->address)) {
|
||||||
|
dev_err(&indio_dev->dev, "GPADC mode\n");
|
||||||
|
ret = -EINVAL;
|
||||||
|
break;
|
||||||
|
}
|
||||||
ret = axp288_adc_read_channel(val, chan->address, info->regmap);
|
ret = axp288_adc_read_channel(val, chan->address, info->regmap);
|
||||||
|
if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
|
||||||
|
chan->address))
|
||||||
|
dev_err(&indio_dev->dev, "TS pin restore\n");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
@ -141,6 +172,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int axp288_adc_set_state(struct regmap *regmap)
|
||||||
|
{
|
||||||
|
/* ADC should be always enabled for internal FG to function */
|
||||||
|
if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
|
return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct iio_info axp288_adc_iio_info = {
|
static const struct iio_info axp288_adc_iio_info = {
|
||||||
.read_raw = &axp288_adc_read_raw,
|
.read_raw = &axp288_adc_read_raw,
|
||||||
.driver_module = THIS_MODULE,
|
.driver_module = THIS_MODULE,
|
||||||
|
@ -169,7 +209,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
|
||||||
* Set ADC to enabled state at all time, including system suspend.
|
* Set ADC to enabled state at all time, including system suspend.
|
||||||
* otherwise internal fuel gauge functionality may be affected.
|
* otherwise internal fuel gauge functionality may be affected.
|
||||||
*/
|
*/
|
||||||
ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
|
ret = axp288_adc_set_state(axp20x->regmap);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&pdev->dev, "unable to enable ADC device\n");
|
dev_err(&pdev->dev, "unable to enable ADC device\n");
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -256,6 +256,7 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val,
|
||||||
|
|
||||||
err:
|
err:
|
||||||
pm_runtime_put_autosuspend(indio_dev->dev.parent);
|
pm_runtime_put_autosuspend(indio_dev->dev.parent);
|
||||||
|
disable_irq(irq);
|
||||||
mutex_unlock(&info->mutex);
|
mutex_unlock(&info->mutex);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -365,7 +366,6 @@ static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id)
|
||||||
complete(&info->completion);
|
complete(&info->completion);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
disable_irq_nosync(info->temp_data_irq);
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -380,7 +380,6 @@ static irqreturn_t sun4i_gpadc_fifo_data_irq_handler(int irq, void *dev_id)
|
||||||
complete(&info->completion);
|
complete(&info->completion);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
disable_irq_nosync(info->fifo_data_irq);
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,7 @@
|
||||||
#define VF610_ADC_ADSTS_MASK 0x300
|
#define VF610_ADC_ADSTS_MASK 0x300
|
||||||
#define VF610_ADC_ADLPC_EN 0x80
|
#define VF610_ADC_ADLPC_EN 0x80
|
||||||
#define VF610_ADC_ADHSC_EN 0x400
|
#define VF610_ADC_ADHSC_EN 0x400
|
||||||
#define VF610_ADC_REFSEL_VALT 0x100
|
#define VF610_ADC_REFSEL_VALT 0x800
|
||||||
#define VF610_ADC_REFSEL_VBG 0x1000
|
#define VF610_ADC_REFSEL_VBG 0x1000
|
||||||
#define VF610_ADC_ADTRG_HARD 0x2000
|
#define VF610_ADC_ADTRG_HARD 0x2000
|
||||||
#define VF610_ADC_AVGS_8 0x4000
|
#define VF610_ADC_AVGS_8 0x4000
|
||||||
|
|
|
@ -550,6 +550,31 @@ out:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(st_sensors_read_info_raw);
|
EXPORT_SYMBOL(st_sensors_read_info_raw);
|
||||||
|
|
||||||
|
static int st_sensors_init_interface_mode(struct iio_dev *indio_dev,
|
||||||
|
const struct st_sensor_settings *sensor_settings)
|
||||||
|
{
|
||||||
|
struct st_sensor_data *sdata = iio_priv(indio_dev);
|
||||||
|
struct device_node *np = sdata->dev->of_node;
|
||||||
|
struct st_sensors_platform_data *pdata;
|
||||||
|
|
||||||
|
pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data;
|
||||||
|
if (((np && of_property_read_bool(np, "spi-3wire")) ||
|
||||||
|
(pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) {
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
|
||||||
|
sensor_settings->sim.addr,
|
||||||
|
sensor_settings->sim.value);
|
||||||
|
if (err < 0) {
|
||||||
|
dev_err(&indio_dev->dev,
|
||||||
|
"failed to init interface mode\n");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int st_sensors_check_device_support(struct iio_dev *indio_dev,
|
int st_sensors_check_device_support(struct iio_dev *indio_dev,
|
||||||
int num_sensors_list,
|
int num_sensors_list,
|
||||||
const struct st_sensor_settings *sensor_settings)
|
const struct st_sensor_settings *sensor_settings)
|
||||||
|
@ -574,6 +599,10 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]);
|
||||||
|
if (err < 0)
|
||||||
|
return err;
|
||||||
|
|
||||||
if (sensor_settings[i].wai_addr) {
|
if (sensor_settings[i].wai_addr) {
|
||||||
err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
|
err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
|
||||||
sensor_settings[i].wai_addr, &wai);
|
sensor_settings[i].wai_addr, &wai);
|
||||||
|
|
|
@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
|
||||||
struct tsl2563_chip *chip = iio_priv(dev_info);
|
struct tsl2563_chip *chip = iio_priv(dev_info);
|
||||||
|
|
||||||
iio_push_event(dev_info,
|
iio_push_event(dev_info,
|
||||||
IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
|
IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
|
||||||
0,
|
0,
|
||||||
IIO_EV_TYPE_THRESH,
|
IIO_EV_TYPE_THRESH,
|
||||||
IIO_EV_DIR_EITHER),
|
IIO_EV_DIR_EITHER),
|
||||||
|
|
|
@ -456,7 +456,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
|
||||||
.mask_od = 0x40,
|
.mask_od = 0x40,
|
||||||
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
||||||
},
|
},
|
||||||
.multi_read_bit = true,
|
.multi_read_bit = false,
|
||||||
.bootime = 2,
|
.bootime = 2,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
|
@ -1519,6 +1519,13 @@ static int arm_smmu_add_device(struct device *dev)
|
||||||
|
|
||||||
if (using_legacy_binding) {
|
if (using_legacy_binding) {
|
||||||
ret = arm_smmu_register_legacy_master(dev, &smmu);
|
ret = arm_smmu_register_legacy_master(dev, &smmu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
|
||||||
|
* will allocate/initialise a new one. Thus we need to update fwspec for
|
||||||
|
* later use.
|
||||||
|
*/
|
||||||
|
fwspec = dev->iommu_fwspec;
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
} else if (fwspec && fwspec->ops == &arm_smmu_ops) {
|
} else if (fwspec && fwspec->ops == &arm_smmu_ops) {
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
|
|
||||||
#define FSM_TIMER_DEBUG 0
|
#define FSM_TIMER_DEBUG 0
|
||||||
|
|
||||||
void
|
int
|
||||||
mISDN_FsmNew(struct Fsm *fsm,
|
mISDN_FsmNew(struct Fsm *fsm,
|
||||||
struct FsmNode *fnlist, int fncount)
|
struct FsmNode *fnlist, int fncount)
|
||||||
{
|
{
|
||||||
|
@ -34,6 +34,8 @@ mISDN_FsmNew(struct Fsm *fsm,
|
||||||
|
|
||||||
fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count *
|
fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count *
|
||||||
fsm->event_count, GFP_KERNEL);
|
fsm->event_count, GFP_KERNEL);
|
||||||
|
if (fsm->jumpmatrix == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
for (i = 0; i < fncount; i++)
|
for (i = 0; i < fncount; i++)
|
||||||
if ((fnlist[i].state >= fsm->state_count) ||
|
if ((fnlist[i].state >= fsm->state_count) ||
|
||||||
|
@ -45,6 +47,7 @@ mISDN_FsmNew(struct Fsm *fsm,
|
||||||
} else
|
} else
|
||||||
fsm->jumpmatrix[fsm->state_count * fnlist[i].event +
|
fsm->jumpmatrix[fsm->state_count * fnlist[i].event +
|
||||||
fnlist[i].state] = (FSMFNPTR) fnlist[i].routine;
|
fnlist[i].state] = (FSMFNPTR) fnlist[i].routine;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mISDN_FsmNew);
|
EXPORT_SYMBOL(mISDN_FsmNew);
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ struct FsmTimer {
|
||||||
void *arg;
|
void *arg;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern void mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
|
extern int mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
|
||||||
extern void mISDN_FsmFree(struct Fsm *);
|
extern void mISDN_FsmFree(struct Fsm *);
|
||||||
extern int mISDN_FsmEvent(struct FsmInst *, int , void *);
|
extern int mISDN_FsmEvent(struct FsmInst *, int , void *);
|
||||||
extern void mISDN_FsmChangeState(struct FsmInst *, int);
|
extern void mISDN_FsmChangeState(struct FsmInst *, int);
|
||||||
|
|
|
@ -414,8 +414,7 @@ l1_init(u_int *deb)
|
||||||
l1fsm_s.event_count = L1_EVENT_COUNT;
|
l1fsm_s.event_count = L1_EVENT_COUNT;
|
||||||
l1fsm_s.strEvent = strL1Event;
|
l1fsm_s.strEvent = strL1Event;
|
||||||
l1fsm_s.strState = strL1SState;
|
l1fsm_s.strState = strL1SState;
|
||||||
mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
|
return mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -2247,15 +2247,26 @@ static struct Bprotocol X75SLP = {
|
||||||
int
|
int
|
||||||
Isdnl2_Init(u_int *deb)
|
Isdnl2_Init(u_int *deb)
|
||||||
{
|
{
|
||||||
|
int res;
|
||||||
debug = deb;
|
debug = deb;
|
||||||
mISDN_register_Bprotocol(&X75SLP);
|
mISDN_register_Bprotocol(&X75SLP);
|
||||||
l2fsm.state_count = L2_STATE_COUNT;
|
l2fsm.state_count = L2_STATE_COUNT;
|
||||||
l2fsm.event_count = L2_EVENT_COUNT;
|
l2fsm.event_count = L2_EVENT_COUNT;
|
||||||
l2fsm.strEvent = strL2Event;
|
l2fsm.strEvent = strL2Event;
|
||||||
l2fsm.strState = strL2State;
|
l2fsm.strState = strL2State;
|
||||||
mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
|
res = mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
|
||||||
TEIInit(deb);
|
if (res)
|
||||||
|
goto error;
|
||||||
|
res = TEIInit(deb);
|
||||||
|
if (res)
|
||||||
|
goto error_fsm;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
error_fsm:
|
||||||
|
mISDN_FsmFree(&l2fsm);
|
||||||
|
error:
|
||||||
|
mISDN_unregister_Bprotocol(&X75SLP);
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -1387,23 +1387,37 @@ create_teimanager(struct mISDNdevice *dev)
|
||||||
|
|
||||||
int TEIInit(u_int *deb)
|
int TEIInit(u_int *deb)
|
||||||
{
|
{
|
||||||
|
int res;
|
||||||
debug = deb;
|
debug = deb;
|
||||||
teifsmu.state_count = TEI_STATE_COUNT;
|
teifsmu.state_count = TEI_STATE_COUNT;
|
||||||
teifsmu.event_count = TEI_EVENT_COUNT;
|
teifsmu.event_count = TEI_EVENT_COUNT;
|
||||||
teifsmu.strEvent = strTeiEvent;
|
teifsmu.strEvent = strTeiEvent;
|
||||||
teifsmu.strState = strTeiState;
|
teifsmu.strState = strTeiState;
|
||||||
mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
|
res = mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
|
||||||
|
if (res)
|
||||||
|
goto error;
|
||||||
teifsmn.state_count = TEI_STATE_COUNT;
|
teifsmn.state_count = TEI_STATE_COUNT;
|
||||||
teifsmn.event_count = TEI_EVENT_COUNT;
|
teifsmn.event_count = TEI_EVENT_COUNT;
|
||||||
teifsmn.strEvent = strTeiEvent;
|
teifsmn.strEvent = strTeiEvent;
|
||||||
teifsmn.strState = strTeiState;
|
teifsmn.strState = strTeiState;
|
||||||
mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
|
res = mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
|
||||||
|
if (res)
|
||||||
|
goto error_smn;
|
||||||
deactfsm.state_count = DEACT_STATE_COUNT;
|
deactfsm.state_count = DEACT_STATE_COUNT;
|
||||||
deactfsm.event_count = DEACT_EVENT_COUNT;
|
deactfsm.event_count = DEACT_EVENT_COUNT;
|
||||||
deactfsm.strEvent = strDeactEvent;
|
deactfsm.strEvent = strDeactEvent;
|
||||||
deactfsm.strState = strDeactState;
|
deactfsm.strState = strDeactState;
|
||||||
mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
|
res = mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
|
||||||
|
if (res)
|
||||||
|
goto error_deact;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
error_deact:
|
||||||
|
mISDN_FsmFree(&teifsmn);
|
||||||
|
error_smn:
|
||||||
|
mISDN_FsmFree(&teifsmu);
|
||||||
|
error:
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
void TEIFree(void)
|
void TEIFree(void)
|
||||||
|
|
|
@ -7996,7 +7996,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
|
||||||
if (mddev->safemode == 1)
|
if (mddev->safemode == 1)
|
||||||
mddev->safemode = 0;
|
mddev->safemode = 0;
|
||||||
/* sync_checkers is always 0 when writes_pending is in per-cpu mode */
|
/* sync_checkers is always 0 when writes_pending is in per-cpu mode */
|
||||||
if (mddev->in_sync || !mddev->sync_checkers) {
|
if (mddev->in_sync || mddev->sync_checkers) {
|
||||||
spin_lock(&mddev->lock);
|
spin_lock(&mddev->lock);
|
||||||
if (mddev->in_sync) {
|
if (mddev->in_sync) {
|
||||||
mddev->in_sync = 0;
|
mddev->in_sync = 0;
|
||||||
|
@ -8656,6 +8656,9 @@ void md_check_recovery(struct mddev *mddev)
|
||||||
if (mddev_trylock(mddev)) {
|
if (mddev_trylock(mddev)) {
|
||||||
int spares = 0;
|
int spares = 0;
|
||||||
|
|
||||||
|
if (!mddev->external && mddev->safemode == 1)
|
||||||
|
mddev->safemode = 0;
|
||||||
|
|
||||||
if (mddev->ro) {
|
if (mddev->ro) {
|
||||||
struct md_rdev *rdev;
|
struct md_rdev *rdev;
|
||||||
if (!mddev->external && mddev->in_sync)
|
if (!mddev->external && mddev->in_sync)
|
||||||
|
|
|
@ -236,9 +236,10 @@ struct r5l_io_unit {
|
||||||
bool need_split_bio;
|
bool need_split_bio;
|
||||||
struct bio *split_bio;
|
struct bio *split_bio;
|
||||||
|
|
||||||
unsigned int has_flush:1; /* include flush request */
|
unsigned int has_flush:1; /* include flush request */
|
||||||
unsigned int has_fua:1; /* include fua request */
|
unsigned int has_fua:1; /* include fua request */
|
||||||
unsigned int has_null_flush:1; /* include empty flush request */
|
unsigned int has_null_flush:1; /* include null flush request */
|
||||||
|
unsigned int has_flush_payload:1; /* include flush payload */
|
||||||
/*
|
/*
|
||||||
* io isn't sent yet, flush/fua request can only be submitted till it's
|
* io isn't sent yet, flush/fua request can only be submitted till it's
|
||||||
* the first IO in running_ios list
|
* the first IO in running_ios list
|
||||||
|
@ -571,6 +572,8 @@ static void r5l_log_endio(struct bio *bio)
|
||||||
struct r5l_io_unit *io_deferred;
|
struct r5l_io_unit *io_deferred;
|
||||||
struct r5l_log *log = io->log;
|
struct r5l_log *log = io->log;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
bool has_null_flush;
|
||||||
|
bool has_flush_payload;
|
||||||
|
|
||||||
if (bio->bi_status)
|
if (bio->bi_status)
|
||||||
md_error(log->rdev->mddev, log->rdev);
|
md_error(log->rdev->mddev, log->rdev);
|
||||||
|
@ -580,6 +583,16 @@ static void r5l_log_endio(struct bio *bio)
|
||||||
|
|
||||||
spin_lock_irqsave(&log->io_list_lock, flags);
|
spin_lock_irqsave(&log->io_list_lock, flags);
|
||||||
__r5l_set_io_unit_state(io, IO_UNIT_IO_END);
|
__r5l_set_io_unit_state(io, IO_UNIT_IO_END);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* if the io doesn't not have null_flush or flush payload,
|
||||||
|
* it is not safe to access it after releasing io_list_lock.
|
||||||
|
* Therefore, it is necessary to check the condition with
|
||||||
|
* the lock held.
|
||||||
|
*/
|
||||||
|
has_null_flush = io->has_null_flush;
|
||||||
|
has_flush_payload = io->has_flush_payload;
|
||||||
|
|
||||||
if (log->need_cache_flush && !list_empty(&io->stripe_list))
|
if (log->need_cache_flush && !list_empty(&io->stripe_list))
|
||||||
r5l_move_to_end_ios(log);
|
r5l_move_to_end_ios(log);
|
||||||
else
|
else
|
||||||
|
@ -600,19 +613,23 @@ static void r5l_log_endio(struct bio *bio)
|
||||||
if (log->need_cache_flush)
|
if (log->need_cache_flush)
|
||||||
md_wakeup_thread(log->rdev->mddev->thread);
|
md_wakeup_thread(log->rdev->mddev->thread);
|
||||||
|
|
||||||
if (io->has_null_flush) {
|
/* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
|
||||||
|
if (has_null_flush) {
|
||||||
struct bio *bi;
|
struct bio *bi;
|
||||||
|
|
||||||
WARN_ON(bio_list_empty(&io->flush_barriers));
|
WARN_ON(bio_list_empty(&io->flush_barriers));
|
||||||
while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
|
while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
|
||||||
bio_endio(bi);
|
bio_endio(bi);
|
||||||
atomic_dec(&io->pending_stripe);
|
if (atomic_dec_and_test(&io->pending_stripe)) {
|
||||||
|
__r5l_stripe_write_finished(io);
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/* decrease pending_stripe for flush payload */
|
||||||
/* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
|
if (has_flush_payload)
|
||||||
if (atomic_read(&io->pending_stripe) == 0)
|
if (atomic_dec_and_test(&io->pending_stripe))
|
||||||
__r5l_stripe_write_finished(io);
|
__r5l_stripe_write_finished(io);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
|
static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
|
||||||
|
@ -881,6 +898,11 @@ static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
|
||||||
payload->size = cpu_to_le32(sizeof(__le64));
|
payload->size = cpu_to_le32(sizeof(__le64));
|
||||||
payload->flush_stripes[0] = cpu_to_le64(sect);
|
payload->flush_stripes[0] = cpu_to_le64(sect);
|
||||||
io->meta_offset += meta_size;
|
io->meta_offset += meta_size;
|
||||||
|
/* multiple flush payloads count as one pending_stripe */
|
||||||
|
if (!io->has_flush_payload) {
|
||||||
|
io->has_flush_payload = 1;
|
||||||
|
atomic_inc(&io->pending_stripe);
|
||||||
|
}
|
||||||
mutex_unlock(&log->io_mutex);
|
mutex_unlock(&log->io_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2540,23 +2562,32 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
|
||||||
*/
|
*/
|
||||||
int r5c_journal_mode_set(struct mddev *mddev, int mode)
|
int r5c_journal_mode_set(struct mddev *mddev, int mode)
|
||||||
{
|
{
|
||||||
struct r5conf *conf = mddev->private;
|
struct r5conf *conf;
|
||||||
struct r5l_log *log = conf->log;
|
int err;
|
||||||
|
|
||||||
if (!log)
|
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
|
if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
|
||||||
mode > R5C_JOURNAL_MODE_WRITE_BACK)
|
mode > R5C_JOURNAL_MODE_WRITE_BACK)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
err = mddev_lock(mddev);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
conf = mddev->private;
|
||||||
|
if (!conf || !conf->log) {
|
||||||
|
mddev_unlock(mddev);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
if (raid5_calc_degraded(conf) > 0 &&
|
if (raid5_calc_degraded(conf) > 0 &&
|
||||||
mode == R5C_JOURNAL_MODE_WRITE_BACK)
|
mode == R5C_JOURNAL_MODE_WRITE_BACK) {
|
||||||
|
mddev_unlock(mddev);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
mddev_suspend(mddev);
|
mddev_suspend(mddev);
|
||||||
conf->log->r5c_journal_mode = mode;
|
conf->log->r5c_journal_mode = mode;
|
||||||
mddev_resume(mddev);
|
mddev_resume(mddev);
|
||||||
|
mddev_unlock(mddev);
|
||||||
|
|
||||||
pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
|
pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
|
||||||
mdname(mddev), mode, r5c_journal_mode_str[mode]);
|
mdname(mddev), mode, r5c_journal_mode_str[mode]);
|
||||||
|
|
|
@ -215,6 +215,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
|
|
||||||
pci_set_drvdata(pdev, dev);
|
pci_set_drvdata(pdev, dev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* MEI requires to resume from runtime suspend mode
|
||||||
|
* in order to perform link reset flow upon system suspend.
|
||||||
|
*/
|
||||||
|
pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For not wake-able HW runtime pm framework
|
* For not wake-able HW runtime pm framework
|
||||||
* can't be used on pci device level.
|
* can't be used on pci device level.
|
||||||
|
|
|
@ -137,6 +137,12 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
|
|
||||||
pci_set_drvdata(pdev, dev);
|
pci_set_drvdata(pdev, dev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* MEI requires to resume from runtime suspend mode
|
||||||
|
* in order to perform link reset flow upon system suspend.
|
||||||
|
*/
|
||||||
|
pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For not wake-able HW runtime pm framework
|
* For not wake-able HW runtime pm framework
|
||||||
* can't be used on pci device level.
|
* can't be used on pci device level.
|
||||||
|
|
|
@ -2170,7 +2170,9 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
|
||||||
* from being accepted.
|
* from being accepted.
|
||||||
*/
|
*/
|
||||||
card = md->queue.card;
|
card = md->queue.card;
|
||||||
|
spin_lock_irq(md->queue.queue->queue_lock);
|
||||||
queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
|
queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
|
||||||
|
spin_unlock_irq(md->queue.queue->queue_lock);
|
||||||
blk_set_queue_dying(md->queue.queue);
|
blk_set_queue_dying(md->queue.queue);
|
||||||
mmc_cleanup_queue(&md->queue);
|
mmc_cleanup_queue(&md->queue);
|
||||||
if (md->disk->flags & GENHD_FL_UP) {
|
if (md->disk->flags & GENHD_FL_UP) {
|
||||||
|
|
|
@ -1289,7 +1289,7 @@ out_err:
|
||||||
static int mmc_select_hs400es(struct mmc_card *card)
|
static int mmc_select_hs400es(struct mmc_card *card)
|
||||||
{
|
{
|
||||||
struct mmc_host *host = card->host;
|
struct mmc_host *host = card->host;
|
||||||
int err = 0;
|
int err = -EINVAL;
|
||||||
u8 val;
|
u8 val;
|
||||||
|
|
||||||
if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
|
if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
|
||||||
|
|
|
@ -2086,7 +2086,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
|
||||||
mmc->max_seg_size = mmc->max_req_size;
|
mmc->max_seg_size = mmc->max_req_size;
|
||||||
|
|
||||||
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
|
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
|
||||||
MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
|
MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
|
||||||
|
|
||||||
mmc->caps |= mmc_pdata(host)->caps;
|
mmc->caps |= mmc_pdata(host)->caps;
|
||||||
if (mmc->caps & MMC_CAP_8_BIT_DATA)
|
if (mmc->caps & MMC_CAP_8_BIT_DATA)
|
||||||
|
|
|
@ -113,6 +113,7 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
|
||||||
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
|
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
|
||||||
if (tr->writesect(dev, block, buf))
|
if (tr->writesect(dev, block, buf))
|
||||||
return BLK_STS_IOERR;
|
return BLK_STS_IOERR;
|
||||||
|
return BLK_STS_OK;
|
||||||
default:
|
default:
|
||||||
return BLK_STS_IOERR;
|
return BLK_STS_IOERR;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1569,7 +1569,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
||||||
new_slave->delay = 0;
|
new_slave->delay = 0;
|
||||||
new_slave->link_failure_count = 0;
|
new_slave->link_failure_count = 0;
|
||||||
|
|
||||||
if (bond_update_speed_duplex(new_slave))
|
if (bond_update_speed_duplex(new_slave) &&
|
||||||
|
bond_needs_speed_duplex(bond))
|
||||||
new_slave->link = BOND_LINK_DOWN;
|
new_slave->link = BOND_LINK_DOWN;
|
||||||
|
|
||||||
new_slave->last_rx = jiffies -
|
new_slave->last_rx = jiffies -
|
||||||
|
@ -2140,11 +2141,13 @@ static void bond_miimon_commit(struct bonding *bond)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
case BOND_LINK_UP:
|
case BOND_LINK_UP:
|
||||||
if (bond_update_speed_duplex(slave)) {
|
if (bond_update_speed_duplex(slave) &&
|
||||||
|
bond_needs_speed_duplex(bond)) {
|
||||||
slave->link = BOND_LINK_DOWN;
|
slave->link = BOND_LINK_DOWN;
|
||||||
netdev_warn(bond->dev,
|
if (net_ratelimit())
|
||||||
"failed to get link speed/duplex for %s\n",
|
netdev_warn(bond->dev,
|
||||||
slave->dev->name);
|
"failed to get link speed/duplex for %s\n",
|
||||||
|
slave->dev->name);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
bond_set_slave_link_state(slave, BOND_LINK_UP,
|
bond_set_slave_link_state(slave, BOND_LINK_UP,
|
||||||
|
|
|
@ -531,6 +531,7 @@ enum { /* adapter flags */
|
||||||
USING_SOFT_PARAMS = (1 << 6),
|
USING_SOFT_PARAMS = (1 << 6),
|
||||||
MASTER_PF = (1 << 7),
|
MASTER_PF = (1 << 7),
|
||||||
FW_OFLD_CONN = (1 << 9),
|
FW_OFLD_CONN = (1 << 9),
|
||||||
|
ROOT_NO_RELAXED_ORDERING = (1 << 10),
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
|
|
@ -4610,11 +4610,6 @@ static void print_port_info(const struct net_device *dev)
|
||||||
dev->name, adap->params.vpd.id, adap->name, buf);
|
dev->name, adap->params.vpd.id, adap->name, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
|
|
||||||
{
|
|
||||||
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free the following resources:
|
* Free the following resources:
|
||||||
* - memory used for tables
|
* - memory used for tables
|
||||||
|
@ -4864,7 +4859,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_enable_pcie_error_reporting(pdev);
|
pci_enable_pcie_error_reporting(pdev);
|
||||||
enable_pcie_relaxed_ordering(pdev);
|
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
pci_save_state(pdev);
|
pci_save_state(pdev);
|
||||||
|
|
||||||
|
@ -4903,6 +4897,23 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
adapter->msg_enable = DFLT_MSG_ENABLE;
|
adapter->msg_enable = DFLT_MSG_ENABLE;
|
||||||
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
|
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
|
||||||
|
|
||||||
|
/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
|
||||||
|
* Ingress Packet Data to Free List Buffers in order to allow for
|
||||||
|
* chipset performance optimizations between the Root Complex and
|
||||||
|
* Memory Controllers. (Messages to the associated Ingress Queue
|
||||||
|
* notifying new Packet Placement in the Free Lists Buffers will be
|
||||||
|
* send without the Relaxed Ordering Attribute thus guaranteeing that
|
||||||
|
* all preceding PCIe Transaction Layer Packets will be processed
|
||||||
|
* first.) But some Root Complexes have various issues with Upstream
|
||||||
|
* Transaction Layer Packets with the Relaxed Ordering Attribute set.
|
||||||
|
* The PCIe devices which under the Root Complexes will be cleared the
|
||||||
|
* Relaxed Ordering bit in the configuration space, So we check our
|
||||||
|
* PCIe configuration space to see if it's flagged with advice against
|
||||||
|
* using Relaxed Ordering.
|
||||||
|
*/
|
||||||
|
if (!pcie_relaxed_ordering_enabled(pdev))
|
||||||
|
adapter->flags |= ROOT_NO_RELAXED_ORDERING;
|
||||||
|
|
||||||
spin_lock_init(&adapter->stats_lock);
|
spin_lock_init(&adapter->stats_lock);
|
||||||
spin_lock_init(&adapter->tid_release_lock);
|
spin_lock_init(&adapter->tid_release_lock);
|
||||||
spin_lock_init(&adapter->win0_lock);
|
spin_lock_init(&adapter->win0_lock);
|
||||||
|
|
|
@ -2719,6 +2719,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
|
||||||
struct fw_iq_cmd c;
|
struct fw_iq_cmd c;
|
||||||
struct sge *s = &adap->sge;
|
struct sge *s = &adap->sge;
|
||||||
struct port_info *pi = netdev_priv(dev);
|
struct port_info *pi = netdev_priv(dev);
|
||||||
|
int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
|
||||||
|
|
||||||
/* Size needs to be multiple of 16, including status entry. */
|
/* Size needs to be multiple of 16, including status entry. */
|
||||||
iq->size = roundup(iq->size, 16);
|
iq->size = roundup(iq->size, 16);
|
||||||
|
@ -2772,8 +2773,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
|
||||||
|
|
||||||
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
|
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
|
||||||
c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
|
c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
|
||||||
FW_IQ_CMD_FL0FETCHRO_F |
|
FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
|
||||||
FW_IQ_CMD_FL0DATARO_F |
|
FW_IQ_CMD_FL0DATARO_V(relaxed) |
|
||||||
FW_IQ_CMD_FL0PADEN_F);
|
FW_IQ_CMD_FL0PADEN_F);
|
||||||
if (cong >= 0)
|
if (cong >= 0)
|
||||||
c.iqns_to_fl0congen |=
|
c.iqns_to_fl0congen |=
|
||||||
|
|
|
@ -408,6 +408,7 @@ enum { /* adapter flags */
|
||||||
USING_MSI = (1UL << 1),
|
USING_MSI = (1UL << 1),
|
||||||
USING_MSIX = (1UL << 2),
|
USING_MSIX = (1UL << 2),
|
||||||
QUEUES_BOUND = (1UL << 3),
|
QUEUES_BOUND = (1UL << 3),
|
||||||
|
ROOT_NO_RELAXED_ORDERING = (1UL << 4),
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue