Merge branch 'open_state'
This commit is contained in:
commit
55cfcd1211
|
@ -27,5 +27,11 @@ You have to add the following kernel parameters in your elilo.conf:
|
|||
Macbook Pro 17", iMac 20" :
|
||||
video=efifb:i20
|
||||
|
||||
Accepted options:
|
||||
|
||||
nowc Don't map the framebuffer write combined. This can be used
|
||||
to workaround side-effects and slowdowns on other CPU cores
|
||||
when large amounts of console data are written.
|
||||
|
||||
--
|
||||
Edgar Hucek <gimli@dark-green.com>
|
||||
|
|
|
@ -459,7 +459,7 @@ pin controller?
|
|||
|
||||
This is done by registering "ranges" of pins, which are essentially
|
||||
cross-reference tables. These are described in
|
||||
Documentation/pinctrl.txt
|
||||
Documentation/driver-api/pinctl.rst
|
||||
|
||||
While the pin allocation is totally managed by the pinctrl subsystem,
|
||||
gpio (under gpiolib) is still maintained by gpio drivers. It may happen
|
||||
|
|
|
@ -1161,7 +1161,7 @@ M: Brendan Higgins <brendanhiggins@google.com>
|
|||
R: Benjamin Herrenschmidt <benh@kernel.crashing.org>
|
||||
R: Joel Stanley <joel@jms.id.au>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
L: openbmc@lists.ozlabs.org
|
||||
L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: drivers/irqchip/irq-aspeed-i2c-ic.c
|
||||
F: drivers/i2c/busses/i2c-aspeed.c
|
||||
|
@ -5834,7 +5834,7 @@ F: drivers/staging/greybus/spi.c
|
|||
F: drivers/staging/greybus/spilib.c
|
||||
F: drivers/staging/greybus/spilib.h
|
||||
|
||||
GREYBUS LOOBACK/TIME PROTOCOLS DRIVERS
|
||||
GREYBUS LOOPBACK/TIME PROTOCOLS DRIVERS
|
||||
M: Bryan O'Donoghue <pure.logic@nexus-software.ie>
|
||||
S: Maintained
|
||||
F: drivers/staging/greybus/loopback.c
|
||||
|
@ -10383,7 +10383,7 @@ L: linux-gpio@vger.kernel.org
|
|||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/pinctrl/
|
||||
F: Documentation/pinctrl.txt
|
||||
F: Documentation/driver-api/pinctl.rst
|
||||
F: drivers/pinctrl/
|
||||
F: include/linux/pinctrl/
|
||||
|
||||
|
@ -14004,6 +14004,7 @@ F: drivers/block/virtio_blk.c
|
|||
F: include/linux/virtio*.h
|
||||
F: include/uapi/linux/virtio_*.h
|
||||
F: drivers/crypto/virtio/
|
||||
F: mm/balloon_compaction.c
|
||||
|
||||
VIRTIO CRYPTO DRIVER
|
||||
M: Gonglei <arei.gonglei@huawei.com>
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -148,7 +148,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
|||
}
|
||||
|
||||
static inline void
|
||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
tlb->mm = mm;
|
||||
tlb->fullmm = !(start | (end+1));
|
||||
|
@ -166,8 +167,14 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
|
|||
}
|
||||
|
||||
static inline void
|
||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end, bool force)
|
||||
{
|
||||
if (force) {
|
||||
tlb->range_start = start;
|
||||
tlb->range_end = end;
|
||||
}
|
||||
|
||||
tlb_flush_mmu(tlb);
|
||||
|
||||
/* keep the page table cache within bounds */
|
||||
|
|
|
@ -168,7 +168,8 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
|
|||
|
||||
|
||||
static inline void
|
||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
tlb->mm = mm;
|
||||
tlb->max = ARRAY_SIZE(tlb->local);
|
||||
|
@ -185,8 +186,11 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
|
|||
* collected.
|
||||
*/
|
||||
static inline void
|
||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end, bool force)
|
||||
{
|
||||
if (force)
|
||||
tlb->need_flush = 1;
|
||||
/*
|
||||
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
|
||||
* tlb->end_addr.
|
||||
|
|
|
@ -2260,7 +2260,7 @@ config CPU_R4K_CACHE_TLB
|
|||
|
||||
config MIPS_MT_SMP
|
||||
bool "MIPS MT SMP support (1 TC on each available VPE)"
|
||||
depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6
|
||||
depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS
|
||||
select CPU_MIPSR2_IRQ_VI
|
||||
select CPU_MIPSR2_IRQ_EI
|
||||
select SYNC_R4K
|
||||
|
|
|
@ -243,8 +243,21 @@ include arch/mips/Kbuild.platforms
|
|||
ifdef CONFIG_PHYSICAL_START
|
||||
load-y = $(CONFIG_PHYSICAL_START)
|
||||
endif
|
||||
entry-y = 0x$(shell $(NM) vmlinux 2>/dev/null \
|
||||
|
||||
entry-noisa-y = 0x$(shell $(NM) vmlinux 2>/dev/null \
|
||||
| grep "\bkernel_entry\b" | cut -f1 -d \ )
|
||||
ifdef CONFIG_CPU_MICROMIPS
|
||||
#
|
||||
# Set the ISA bit, since the kernel_entry symbol in the ELF will have it
|
||||
# clear which would lead to images containing addresses which bootloaders may
|
||||
# jump to as MIPS32 code.
|
||||
#
|
||||
entry-y = $(patsubst %0,%1,$(patsubst %2,%3,$(patsubst %4,%5, \
|
||||
$(patsubst %6,%7,$(patsubst %8,%9,$(patsubst %a,%b, \
|
||||
$(patsubst %c,%d,$(patsubst %e,%f,$(entry-noisa-y)))))))))
|
||||
else
|
||||
entry-y = $(entry-noisa-y)
|
||||
endif
|
||||
|
||||
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
|
||||
drivers-$(CONFIG_PCI) += arch/mips/pci/
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
ashldi3.c
|
||||
bswapsi.c
|
|
@ -13,9 +13,9 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/octeon/octeon.h>
|
||||
#include <asm/octeon/cvmx-gpio-defs.h>
|
||||
|
||||
/* USB Control Register */
|
||||
union cvm_usbdrd_uctl_ctl {
|
||||
|
|
|
@ -147,23 +147,12 @@
|
|||
* Find irq with highest priority
|
||||
*/
|
||||
# open coded PTR_LA t1, cpu_mask_nr_tbl
|
||||
#if (_MIPS_SZPTR == 32)
|
||||
#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
|
||||
# open coded la t1, cpu_mask_nr_tbl
|
||||
lui t1, %hi(cpu_mask_nr_tbl)
|
||||
addiu t1, %lo(cpu_mask_nr_tbl)
|
||||
|
||||
#endif
|
||||
#if (_MIPS_SZPTR == 64)
|
||||
# open coded dla t1, cpu_mask_nr_tbl
|
||||
.set push
|
||||
.set noat
|
||||
lui t1, %highest(cpu_mask_nr_tbl)
|
||||
lui AT, %hi(cpu_mask_nr_tbl)
|
||||
daddiu t1, t1, %higher(cpu_mask_nr_tbl)
|
||||
daddiu AT, AT, %lo(cpu_mask_nr_tbl)
|
||||
dsll t1, 32
|
||||
daddu t1, t1, AT
|
||||
.set pop
|
||||
#else
|
||||
#error GCC `-msym32' option required for 64-bit DECstation builds
|
||||
#endif
|
||||
1: lw t2,(t1)
|
||||
nop
|
||||
|
@ -214,23 +203,12 @@
|
|||
* Find irq with highest priority
|
||||
*/
|
||||
# open coded PTR_LA t1,asic_mask_nr_tbl
|
||||
#if (_MIPS_SZPTR == 32)
|
||||
#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
|
||||
# open coded la t1, asic_mask_nr_tbl
|
||||
lui t1, %hi(asic_mask_nr_tbl)
|
||||
addiu t1, %lo(asic_mask_nr_tbl)
|
||||
|
||||
#endif
|
||||
#if (_MIPS_SZPTR == 64)
|
||||
# open coded dla t1, asic_mask_nr_tbl
|
||||
.set push
|
||||
.set noat
|
||||
lui t1, %highest(asic_mask_nr_tbl)
|
||||
lui AT, %hi(asic_mask_nr_tbl)
|
||||
daddiu t1, t1, %higher(asic_mask_nr_tbl)
|
||||
daddiu AT, AT, %lo(asic_mask_nr_tbl)
|
||||
dsll t1, 32
|
||||
daddu t1, t1, AT
|
||||
.set pop
|
||||
#else
|
||||
#error GCC `-msym32' option required for 64-bit DECstation builds
|
||||
#endif
|
||||
2: lw t2,(t1)
|
||||
nop
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
#ifndef _ASM_CACHE_H
|
||||
#define _ASM_CACHE_H
|
||||
|
||||
#include <kmalloc.h>
|
||||
|
||||
#define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
|
||||
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
||||
|
||||
|
|
|
@ -428,6 +428,9 @@
|
|||
#ifndef cpu_scache_line_size
|
||||
#define cpu_scache_line_size() cpu_data[0].scache.linesz
|
||||
#endif
|
||||
#ifndef cpu_tcache_line_size
|
||||
#define cpu_tcache_line_size() cpu_data[0].tcache.linesz
|
||||
#endif
|
||||
|
||||
#ifndef cpu_hwrena_impl_bits
|
||||
#define cpu_hwrena_impl_bits 0
|
||||
|
|
|
@ -33,6 +33,10 @@
|
|||
#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
|
||||
#define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull))
|
||||
#define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull))
|
||||
#define CVMX_L2C_ERR_TDTX(block_id) \
|
||||
(CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull)
|
||||
#define CVMX_L2C_ERR_TTGX(block_id) \
|
||||
(CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull)
|
||||
#define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull))
|
||||
#define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull))
|
||||
#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
|
||||
|
@ -66,9 +70,40 @@
|
|||
((offset) & 1) * 8)
|
||||
#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + \
|
||||
((offset) & 31) * 8)
|
||||
#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
|
||||
|
||||
|
||||
union cvmx_l2c_err_tdtx {
|
||||
uint64_t u64;
|
||||
struct cvmx_l2c_err_tdtx_s {
|
||||
__BITFIELD_FIELD(uint64_t dbe:1,
|
||||
__BITFIELD_FIELD(uint64_t sbe:1,
|
||||
__BITFIELD_FIELD(uint64_t vdbe:1,
|
||||
__BITFIELD_FIELD(uint64_t vsbe:1,
|
||||
__BITFIELD_FIELD(uint64_t syn:10,
|
||||
__BITFIELD_FIELD(uint64_t reserved_22_49:28,
|
||||
__BITFIELD_FIELD(uint64_t wayidx:18,
|
||||
__BITFIELD_FIELD(uint64_t reserved_2_3:2,
|
||||
__BITFIELD_FIELD(uint64_t type:2,
|
||||
;)))))))))
|
||||
} s;
|
||||
};
|
||||
|
||||
union cvmx_l2c_err_ttgx {
|
||||
uint64_t u64;
|
||||
struct cvmx_l2c_err_ttgx_s {
|
||||
__BITFIELD_FIELD(uint64_t dbe:1,
|
||||
__BITFIELD_FIELD(uint64_t sbe:1,
|
||||
__BITFIELD_FIELD(uint64_t noway:1,
|
||||
__BITFIELD_FIELD(uint64_t reserved_56_60:5,
|
||||
__BITFIELD_FIELD(uint64_t syn:6,
|
||||
__BITFIELD_FIELD(uint64_t reserved_22_49:28,
|
||||
__BITFIELD_FIELD(uint64_t wayidx:15,
|
||||
__BITFIELD_FIELD(uint64_t reserved_2_6:5,
|
||||
__BITFIELD_FIELD(uint64_t type:2,
|
||||
;)))))))))
|
||||
} s;
|
||||
};
|
||||
|
||||
union cvmx_l2c_cfg {
|
||||
uint64_t u64;
|
||||
struct cvmx_l2c_cfg_s {
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
/***********************license start***************
|
||||
* Author: Cavium Networks
|
||||
*
|
||||
* Contact: support@caviumnetworks.com
|
||||
* This file is part of the OCTEON SDK
|
||||
*
|
||||
* Copyright (c) 2003-2017 Cavium, Inc.
|
||||
*
|
||||
* This file is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, Version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This file is distributed in the hope that it will be useful, but
|
||||
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
|
||||
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
|
||||
* NONINFRINGEMENT. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this file; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
* or visit http://www.gnu.org/licenses/.
|
||||
*
|
||||
* This file may also be available under a different license from Cavium.
|
||||
* Contact Cavium Networks for more information
|
||||
***********************license end**************************************/
|
||||
|
||||
#ifndef __CVMX_L2D_DEFS_H__
|
||||
#define __CVMX_L2D_DEFS_H__
|
||||
|
||||
#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))
|
||||
#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
|
||||
|
||||
|
||||
union cvmx_l2d_err {
|
||||
uint64_t u64;
|
||||
struct cvmx_l2d_err_s {
|
||||
__BITFIELD_FIELD(uint64_t reserved_6_63:58,
|
||||
__BITFIELD_FIELD(uint64_t bmhclsel:1,
|
||||
__BITFIELD_FIELD(uint64_t ded_err:1,
|
||||
__BITFIELD_FIELD(uint64_t sec_err:1,
|
||||
__BITFIELD_FIELD(uint64_t ded_intena:1,
|
||||
__BITFIELD_FIELD(uint64_t sec_intena:1,
|
||||
__BITFIELD_FIELD(uint64_t ecc_ena:1,
|
||||
;)))))))
|
||||
} s;
|
||||
};
|
||||
|
||||
union cvmx_l2d_fus3 {
|
||||
uint64_t u64;
|
||||
struct cvmx_l2d_fus3_s {
|
||||
__BITFIELD_FIELD(uint64_t reserved_40_63:24,
|
||||
__BITFIELD_FIELD(uint64_t ema_ctl:3,
|
||||
__BITFIELD_FIELD(uint64_t reserved_34_36:3,
|
||||
__BITFIELD_FIELD(uint64_t q3fus:34,
|
||||
;))))
|
||||
} s;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -62,6 +62,7 @@ enum cvmx_mips_space {
|
|||
#include <asm/octeon/cvmx-iob-defs.h>
|
||||
#include <asm/octeon/cvmx-ipd-defs.h>
|
||||
#include <asm/octeon/cvmx-l2c-defs.h>
|
||||
#include <asm/octeon/cvmx-l2d-defs.h>
|
||||
#include <asm/octeon/cvmx-l2t-defs.h>
|
||||
#include <asm/octeon/cvmx-led-defs.h>
|
||||
#include <asm/octeon/cvmx-mio-defs.h>
|
||||
|
|
|
@ -376,9 +376,6 @@ asmlinkage void start_secondary(void)
|
|||
cpumask_set_cpu(cpu, &cpu_coherent_mask);
|
||||
notify_cpu_starting(cpu);
|
||||
|
||||
complete(&cpu_running);
|
||||
synchronise_count_slave(cpu);
|
||||
|
||||
set_cpu_online(cpu, true);
|
||||
|
||||
set_cpu_sibling_map(cpu);
|
||||
|
@ -386,6 +383,9 @@ asmlinkage void start_secondary(void)
|
|||
|
||||
calculate_cpu_foreign_map();
|
||||
|
||||
complete(&cpu_running);
|
||||
synchronise_count_slave(cpu);
|
||||
|
||||
/*
|
||||
* irq will be enabled in ->smp_finish(), enabling it too early
|
||||
* is dangerous.
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
#include "uasm.c"
|
||||
|
||||
static const struct insn const insn_table[insn_invalid] = {
|
||||
static const struct insn insn_table[insn_invalid] = {
|
||||
[insn_addiu] = {M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
|
||||
[insn_addu] = {M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD},
|
||||
[insn_and] = {M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD},
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -28,16 +28,15 @@ EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
|
|||
|
||||
static int __init pcibios_set_cache_line_size(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
unsigned int lsize;
|
||||
|
||||
/*
|
||||
* Set PCI cacheline size to that of the highest level in the
|
||||
* cache hierarchy.
|
||||
*/
|
||||
lsize = c->dcache.linesz;
|
||||
lsize = c->scache.linesz ? : lsize;
|
||||
lsize = c->tcache.linesz ? : lsize;
|
||||
lsize = cpu_dcache_line_size();
|
||||
lsize = cpu_scache_line_size() ? : lsize;
|
||||
lsize = cpu_tcache_line_size() ? : lsize;
|
||||
|
||||
BUG_ON(!lsize);
|
||||
|
||||
|
|
|
@ -35,7 +35,8 @@ static __always_inline long gettimeofday_fallback(struct timeval *_tv,
|
|||
" syscall\n"
|
||||
: "=r" (ret), "=r" (error)
|
||||
: "r" (tv), "r" (tz), "r" (nr)
|
||||
: "memory");
|
||||
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
|
||||
"$14", "$15", "$24", "$25", "hi", "lo", "memory");
|
||||
|
||||
return error ? -ret : ret;
|
||||
}
|
||||
|
@ -55,7 +56,8 @@ static __always_inline long clock_gettime_fallback(clockid_t _clkid,
|
|||
" syscall\n"
|
||||
: "=r" (ret), "=r" (error)
|
||||
: "r" (clkid), "r" (ts), "r" (nr)
|
||||
: "memory");
|
||||
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
|
||||
"$14", "$15", "$24", "$25", "hi", "lo", "memory");
|
||||
|
||||
return error ? -ret : ret;
|
||||
}
|
||||
|
|
|
@ -293,7 +293,8 @@ CONFIG_MAGIC_SYSRQ=y
|
|||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||
CONFIG_LOCKUP_DETECTOR=y
|
||||
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||
CONFIG_HARDLOCKUP_DETECTOR=y
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_SCHED_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
|
|
|
@ -324,7 +324,8 @@ CONFIG_MAGIC_SYSRQ=y
|
|||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||
CONFIG_LOCKUP_DETECTOR=y
|
||||
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||
CONFIG_HARDLOCKUP_DETECTOR=y
|
||||
CONFIG_DEBUG_MUTEXES=y
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_SCHED_TRACER=y
|
||||
|
|
|
@ -291,7 +291,8 @@ CONFIG_MAGIC_SYSRQ=y
|
|||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||
CONFIG_LOCKUP_DETECTOR=y
|
||||
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||
CONFIG_HARDLOCKUP_DETECTOR=y
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_SCHED_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
|
|
|
@ -223,17 +223,27 @@ system_call_exit:
|
|||
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
|
||||
bne- .Lsyscall_exit_work
|
||||
|
||||
/* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */
|
||||
li r7,MSR_FP
|
||||
andi. r0,r8,MSR_FP
|
||||
beq 2f
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
oris r7,r7,MSR_VEC@h
|
||||
andis. r0,r8,MSR_VEC@h
|
||||
bne 3f
|
||||
#endif
|
||||
and r0,r8,r7
|
||||
cmpd r0,r7
|
||||
bne .Lsyscall_restore_math
|
||||
.Lsyscall_restore_math_cont:
|
||||
2: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
li r10,MSR_RI
|
||||
mtmsrd r10,1 /* Restore RI */
|
||||
#endif
|
||||
bl restore_math
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
li r11,0
|
||||
mtmsrd r11,1
|
||||
#endif
|
||||
ld r8,_MSR(r1)
|
||||
ld r3,RESULT(r1)
|
||||
li r11,-MAX_ERRNO
|
||||
|
||||
cmpld r3,r11
|
||||
3: cmpld r3,r11
|
||||
ld r5,_CCR(r1)
|
||||
bge- .Lsyscall_error
|
||||
.Lsyscall_error_cont:
|
||||
|
@ -267,40 +277,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|||
std r5,_CCR(r1)
|
||||
b .Lsyscall_error_cont
|
||||
|
||||
.Lsyscall_restore_math:
|
||||
/*
|
||||
* Some initial tests from restore_math to avoid the heavyweight
|
||||
* C code entry and MSR manipulations.
|
||||
*/
|
||||
LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK)
|
||||
and. r0,r0,r8
|
||||
bne 1f
|
||||
|
||||
ld r7,PACACURRENT(r13)
|
||||
lbz r0,THREAD+THREAD_LOAD_FP(r7)
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
lbz r6,THREAD+THREAD_LOAD_VEC(r7)
|
||||
add r0,r0,r6
|
||||
#endif
|
||||
cmpdi r0,0
|
||||
beq .Lsyscall_restore_math_cont
|
||||
|
||||
1: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
li r10,MSR_RI
|
||||
mtmsrd r10,1 /* Restore RI */
|
||||
#endif
|
||||
bl restore_math
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
li r11,0
|
||||
mtmsrd r11,1
|
||||
#endif
|
||||
/* Restore volatiles, reload MSR from updated one */
|
||||
ld r8,_MSR(r1)
|
||||
ld r3,RESULT(r1)
|
||||
li r11,-MAX_ERRNO
|
||||
b .Lsyscall_restore_math_cont
|
||||
|
||||
/* Traced system call support */
|
||||
.Lsyscall_dotrace:
|
||||
bl save_nvgprs
|
||||
|
|
|
@ -511,10 +511,6 @@ void restore_math(struct pt_regs *regs)
|
|||
{
|
||||
unsigned long msr;
|
||||
|
||||
/*
|
||||
* Syscall exit makes a similar initial check before branching
|
||||
* to restore_math. Keep them in synch.
|
||||
*/
|
||||
if (!msr_tm_active(regs->msr) &&
|
||||
!current->thread.load_fp && !loadvec(current->thread))
|
||||
return;
|
||||
|
|
|
@ -351,7 +351,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
|
|||
hard_irq_disable();
|
||||
while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
|
||||
raw_local_irq_restore(*flags);
|
||||
cpu_relax();
|
||||
spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
|
||||
raw_local_irq_save(*flags);
|
||||
hard_irq_disable();
|
||||
}
|
||||
|
@ -360,7 +360,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
|
|||
static void nmi_ipi_lock(void)
|
||||
{
|
||||
while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
|
||||
cpu_relax();
|
||||
spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
|
||||
}
|
||||
|
||||
static void nmi_ipi_unlock(void)
|
||||
|
@ -475,7 +475,7 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
|
|||
nmi_ipi_lock_start(&flags);
|
||||
while (nmi_ipi_busy_count) {
|
||||
nmi_ipi_unlock_end(&flags);
|
||||
cpu_relax();
|
||||
spin_until_cond(nmi_ipi_busy_count == 0);
|
||||
nmi_ipi_lock_start(&flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -71,15 +71,20 @@ static inline void wd_smp_lock(unsigned long *flags)
|
|||
* This may be called from low level interrupt handlers at some
|
||||
* point in future.
|
||||
*/
|
||||
local_irq_save(*flags);
|
||||
while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock)))
|
||||
cpu_relax();
|
||||
raw_local_irq_save(*flags);
|
||||
hard_irq_disable(); /* Make it soft-NMI safe */
|
||||
while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {
|
||||
raw_local_irq_restore(*flags);
|
||||
spin_until_cond(!test_bit(0, &__wd_smp_lock));
|
||||
raw_local_irq_save(*flags);
|
||||
hard_irq_disable();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void wd_smp_unlock(unsigned long *flags)
|
||||
{
|
||||
clear_bit_unlock(0, &__wd_smp_lock);
|
||||
local_irq_restore(*flags);
|
||||
raw_local_irq_restore(*flags);
|
||||
}
|
||||
|
||||
static void wd_lockup_ipi(struct pt_regs *regs)
|
||||
|
@ -96,10 +101,10 @@ static void wd_lockup_ipi(struct pt_regs *regs)
|
|||
nmi_panic(regs, "Hard LOCKUP");
|
||||
}
|
||||
|
||||
static void set_cpu_stuck(int cpu, u64 tb)
|
||||
static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
|
||||
{
|
||||
cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
|
||||
cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
|
||||
cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
|
||||
cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
|
||||
if (cpumask_empty(&wd_smp_cpus_pending)) {
|
||||
wd_smp_last_reset_tb = tb;
|
||||
cpumask_andnot(&wd_smp_cpus_pending,
|
||||
|
@ -107,6 +112,10 @@ static void set_cpu_stuck(int cpu, u64 tb)
|
|||
&wd_smp_cpus_stuck);
|
||||
}
|
||||
}
|
||||
static void set_cpu_stuck(int cpu, u64 tb)
|
||||
{
|
||||
set_cpumask_stuck(cpumask_of(cpu), tb);
|
||||
}
|
||||
|
||||
static void watchdog_smp_panic(int cpu, u64 tb)
|
||||
{
|
||||
|
@ -135,11 +144,9 @@ static void watchdog_smp_panic(int cpu, u64 tb)
|
|||
}
|
||||
smp_flush_nmi_ipi(1000000);
|
||||
|
||||
/* Take the stuck CPU out of the watch group */
|
||||
for_each_cpu(c, &wd_smp_cpus_pending)
|
||||
set_cpu_stuck(c, tb);
|
||||
/* Take the stuck CPUs out of the watch group */
|
||||
set_cpumask_stuck(&wd_smp_cpus_pending, tb);
|
||||
|
||||
out:
|
||||
wd_smp_unlock(&flags);
|
||||
|
||||
printk_safe_flush();
|
||||
|
@ -152,6 +159,11 @@ out:
|
|||
|
||||
if (hardlockup_panic)
|
||||
nmi_panic(NULL, "Hard LOCKUP");
|
||||
|
||||
return;
|
||||
|
||||
out:
|
||||
wd_smp_unlock(&flags);
|
||||
}
|
||||
|
||||
static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
|
||||
|
@ -258,9 +270,11 @@ static void wd_timer_fn(unsigned long data)
|
|||
|
||||
void arch_touch_nmi_watchdog(void)
|
||||
{
|
||||
unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
watchdog_timer_interrupt(cpu);
|
||||
if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks)
|
||||
watchdog_timer_interrupt(cpu);
|
||||
}
|
||||
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
|
||||
|
||||
|
@ -283,6 +297,8 @@ static void stop_watchdog_timer_on(unsigned int cpu)
|
|||
|
||||
static int start_wd_on_cpu(unsigned int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
|
@ -297,12 +313,14 @@ static int start_wd_on_cpu(unsigned int cpu)
|
|||
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
|
||||
return 0;
|
||||
|
||||
wd_smp_lock(&flags);
|
||||
cpumask_set_cpu(cpu, &wd_cpus_enabled);
|
||||
if (cpumask_weight(&wd_cpus_enabled) == 1) {
|
||||
cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
|
||||
wd_smp_last_reset_tb = get_tb();
|
||||
}
|
||||
smp_wmb();
|
||||
wd_smp_unlock(&flags);
|
||||
|
||||
start_watchdog_timer_on(cpu);
|
||||
|
||||
return 0;
|
||||
|
@ -310,12 +328,17 @@ static int start_wd_on_cpu(unsigned int cpu)
|
|||
|
||||
static int stop_wd_on_cpu(unsigned int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
|
||||
return 0; /* Can happen in CPU unplug case */
|
||||
|
||||
stop_watchdog_timer_on(cpu);
|
||||
|
||||
wd_smp_lock(&flags);
|
||||
cpumask_clear_cpu(cpu, &wd_cpus_enabled);
|
||||
wd_smp_unlock(&flags);
|
||||
|
||||
wd_smp_clear_cpu_pending(cpu, get_tb());
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -56,6 +56,7 @@ u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
|
|||
*/
|
||||
static u64 pnv_deepest_stop_psscr_val;
|
||||
static u64 pnv_deepest_stop_psscr_mask;
|
||||
static u64 pnv_deepest_stop_flag;
|
||||
static bool deepest_stop_found;
|
||||
|
||||
static int pnv_save_sprs_for_deep_states(void)
|
||||
|
@ -185,8 +186,40 @@ static void pnv_alloc_idle_core_states(void)
|
|||
|
||||
update_subcore_sibling_mask();
|
||||
|
||||
if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
|
||||
pnv_save_sprs_for_deep_states();
|
||||
if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
|
||||
int rc = pnv_save_sprs_for_deep_states();
|
||||
|
||||
if (likely(!rc))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The stop-api is unable to restore hypervisor
|
||||
* resources on wakeup from platform idle states which
|
||||
* lose full context. So disable such states.
|
||||
*/
|
||||
supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
|
||||
pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
|
||||
pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300) &&
|
||||
(pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
|
||||
/*
|
||||
* Use the default stop state for CPU-Hotplug
|
||||
* if available.
|
||||
*/
|
||||
if (default_stop_found) {
|
||||
pnv_deepest_stop_psscr_val =
|
||||
pnv_default_stop_val;
|
||||
pnv_deepest_stop_psscr_mask =
|
||||
pnv_default_stop_mask;
|
||||
pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
|
||||
pnv_deepest_stop_psscr_val);
|
||||
} else { /* Fallback to snooze loop for CPU-Hotplug */
|
||||
deepest_stop_found = false;
|
||||
pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
u32 pnv_get_supported_cpuidle_states(void)
|
||||
|
@ -375,7 +408,8 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
|
|||
pnv_deepest_stop_psscr_val;
|
||||
srr1 = power9_idle_stop(psscr);
|
||||
|
||||
} else if (idle_states & OPAL_PM_WINKLE_ENABLED) {
|
||||
} else if ((idle_states & OPAL_PM_WINKLE_ENABLED) &&
|
||||
(idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) {
|
||||
srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
|
||||
} else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
|
||||
(idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
|
||||
|
@ -553,6 +587,7 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
|
|||
max_residency_ns = residency_ns[i];
|
||||
pnv_deepest_stop_psscr_val = psscr_val[i];
|
||||
pnv_deepest_stop_psscr_mask = psscr_mask[i];
|
||||
pnv_deepest_stop_flag = flags[i];
|
||||
deepest_stop_found = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,10 +47,9 @@ struct mmu_table_batch {
|
|||
extern void tlb_table_flush(struct mmu_gather *tlb);
|
||||
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
|
||||
|
||||
static inline void tlb_gather_mmu(struct mmu_gather *tlb,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
static inline void
|
||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
tlb->mm = mm;
|
||||
tlb->start = start;
|
||||
|
@ -76,9 +75,15 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
|||
tlb_flush_mmu_free(tlb);
|
||||
}
|
||||
|
||||
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end)
|
||||
static inline void
|
||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end, bool force)
|
||||
{
|
||||
if (force) {
|
||||
tlb->start = start;
|
||||
tlb->end = end;
|
||||
}
|
||||
|
||||
tlb_flush_mmu(tlb);
|
||||
}
|
||||
|
||||
|
|
|
@ -1253,7 +1253,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
|
|||
insn_count = bpf_jit_insn(jit, fp, i);
|
||||
if (insn_count < 0)
|
||||
return -1;
|
||||
jit->addrs[i + 1] = jit->prg; /* Next instruction address */
|
||||
/* Next instruction address */
|
||||
jit->addrs[i + insn_count] = jit->prg;
|
||||
}
|
||||
bpf_jit_epilogue(jit);
|
||||
|
||||
|
|
|
@ -36,7 +36,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
|
|||
}
|
||||
|
||||
static inline void
|
||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
tlb->mm = mm;
|
||||
tlb->start = start;
|
||||
|
@ -47,9 +48,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
|
|||
}
|
||||
|
||||
static inline void
|
||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end, bool force)
|
||||
{
|
||||
if (tlb->fullmm)
|
||||
if (tlb->fullmm || force)
|
||||
flush_tlb_mm(tlb->mm);
|
||||
|
||||
/* keep the page table cache within bounds */
|
||||
|
|
|
@ -47,10 +47,26 @@
|
|||
#define SUN4V_CHIP_NIAGARA5 0x05
|
||||
#define SUN4V_CHIP_SPARC_M6 0x06
|
||||
#define SUN4V_CHIP_SPARC_M7 0x07
|
||||
#define SUN4V_CHIP_SPARC_M8 0x08
|
||||
#define SUN4V_CHIP_SPARC64X 0x8a
|
||||
#define SUN4V_CHIP_SPARC_SN 0x8b
|
||||
#define SUN4V_CHIP_UNKNOWN 0xff
|
||||
|
||||
/*
|
||||
* The following CPU_ID_xxx constants are used
|
||||
* to identify the CPU type in the setup phase
|
||||
* (see head_64.S)
|
||||
*/
|
||||
#define CPU_ID_NIAGARA1 ('1')
|
||||
#define CPU_ID_NIAGARA2 ('2')
|
||||
#define CPU_ID_NIAGARA3 ('3')
|
||||
#define CPU_ID_NIAGARA4 ('4')
|
||||
#define CPU_ID_NIAGARA5 ('5')
|
||||
#define CPU_ID_M6 ('6')
|
||||
#define CPU_ID_M7 ('7')
|
||||
#define CPU_ID_M8 ('8')
|
||||
#define CPU_ID_SONOMA1 ('N')
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
enum ultra_tlb_layout {
|
||||
|
|
|
@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void)
|
|||
sparc_pmu_type = "sparc-m7";
|
||||
break;
|
||||
|
||||
case SUN4V_CHIP_SPARC_M8:
|
||||
sparc_cpu_type = "SPARC-M8";
|
||||
sparc_fpu_type = "SPARC-M8 integrated FPU";
|
||||
sparc_pmu_type = "sparc-m8";
|
||||
break;
|
||||
|
||||
case SUN4V_CHIP_SPARC_SN:
|
||||
sparc_cpu_type = "SPARC-SN";
|
||||
sparc_fpu_type = "SPARC-SN integrated FPU";
|
||||
|
|
|
@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
|
|||
case SUN4V_CHIP_NIAGARA5:
|
||||
case SUN4V_CHIP_SPARC_M6:
|
||||
case SUN4V_CHIP_SPARC_M7:
|
||||
case SUN4V_CHIP_SPARC_M8:
|
||||
case SUN4V_CHIP_SPARC_SN:
|
||||
case SUN4V_CHIP_SPARC64X:
|
||||
rover_inc_table = niagara_iterate_method;
|
||||
|
|
|
@ -424,22 +424,25 @@ EXPORT_SYMBOL(sun4v_chip_type)
|
|||
nop
|
||||
|
||||
70: ldub [%g1 + 7], %g2
|
||||
cmp %g2, '3'
|
||||
cmp %g2, CPU_ID_NIAGARA3
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_NIAGARA3, %g4
|
||||
cmp %g2, '4'
|
||||
cmp %g2, CPU_ID_NIAGARA4
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_NIAGARA4, %g4
|
||||
cmp %g2, '5'
|
||||
cmp %g2, CPU_ID_NIAGARA5
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_NIAGARA5, %g4
|
||||
cmp %g2, '6'
|
||||
cmp %g2, CPU_ID_M6
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_SPARC_M6, %g4
|
||||
cmp %g2, '7'
|
||||
cmp %g2, CPU_ID_M7
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_SPARC_M7, %g4
|
||||
cmp %g2, 'N'
|
||||
cmp %g2, CPU_ID_M8
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_SPARC_M8, %g4
|
||||
cmp %g2, CPU_ID_SONOMA1
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_SPARC_SN, %g4
|
||||
ba,pt %xcc, 49f
|
||||
|
@ -448,10 +451,10 @@ EXPORT_SYMBOL(sun4v_chip_type)
|
|||
91: sethi %hi(prom_cpu_compatible), %g1
|
||||
or %g1, %lo(prom_cpu_compatible), %g1
|
||||
ldub [%g1 + 17], %g2
|
||||
cmp %g2, '1'
|
||||
cmp %g2, CPU_ID_NIAGARA1
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_NIAGARA1, %g4
|
||||
cmp %g2, '2'
|
||||
cmp %g2, CPU_ID_NIAGARA2
|
||||
be,pt %xcc, 5f
|
||||
mov SUN4V_CHIP_NIAGARA2, %g4
|
||||
|
||||
|
@ -600,6 +603,9 @@ niagara_tlb_fixup:
|
|||
be,pt %xcc, niagara4_patch
|
||||
nop
|
||||
cmp %g1, SUN4V_CHIP_SPARC_M7
|
||||
be,pt %xcc, niagara4_patch
|
||||
nop
|
||||
cmp %g1, SUN4V_CHIP_SPARC_M8
|
||||
be,pt %xcc, niagara4_patch
|
||||
nop
|
||||
cmp %g1, SUN4V_CHIP_SPARC_SN
|
||||
|
|
|
@ -288,10 +288,17 @@ static void __init sun4v_patch(void)
|
|||
|
||||
sun4v_patch_2insn_range(&__sun4v_2insn_patch,
|
||||
&__sun4v_2insn_patch_end);
|
||||
if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
|
||||
|
||||
switch (sun4v_chip_type) {
|
||||
case SUN4V_CHIP_SPARC_M7:
|
||||
case SUN4V_CHIP_SPARC_M8:
|
||||
case SUN4V_CHIP_SPARC_SN:
|
||||
sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
|
||||
&__sun_m7_2insn_patch_end);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
sun4v_hvapi_init();
|
||||
}
|
||||
|
@ -529,6 +536,7 @@ static void __init init_sparc64_elf_hwcap(void)
|
|||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
|
||||
cap |= HWCAP_SPARC_BLKINIT;
|
||||
|
@ -538,6 +546,7 @@ static void __init init_sparc64_elf_hwcap(void)
|
|||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
|
||||
cap |= HWCAP_SPARC_N2;
|
||||
|
@ -568,6 +577,7 @@ static void __init init_sparc64_elf_hwcap(void)
|
|||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
|
||||
cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
|
||||
|
@ -578,6 +588,7 @@ static void __init init_sparc64_elf_hwcap(void)
|
|||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
|
||||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
|
||||
cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
|
||||
|
|
|
@ -1944,12 +1944,22 @@ static void __init setup_page_offset(void)
|
|||
break;
|
||||
case SUN4V_CHIP_SPARC_M7:
|
||||
case SUN4V_CHIP_SPARC_SN:
|
||||
default:
|
||||
/* M7 and later support 52-bit virtual addresses. */
|
||||
sparc64_va_hole_top = 0xfff8000000000000UL;
|
||||
sparc64_va_hole_bottom = 0x0008000000000000UL;
|
||||
max_phys_bits = 49;
|
||||
break;
|
||||
case SUN4V_CHIP_SPARC_M8:
|
||||
default:
|
||||
/* M8 and later support 54-bit virtual addresses.
|
||||
* However, restricting M8 and above VA bits to 53
|
||||
* as 4-level page table cannot support more than
|
||||
* 53 VA bits.
|
||||
*/
|
||||
sparc64_va_hole_top = 0xfff0000000000000UL;
|
||||
sparc64_va_hole_bottom = 0x0010000000000000UL;
|
||||
max_phys_bits = 51;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2161,6 +2171,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
|
|||
*/
|
||||
switch (sun4v_chip_type) {
|
||||
case SUN4V_CHIP_SPARC_M7:
|
||||
case SUN4V_CHIP_SPARC_M8:
|
||||
case SUN4V_CHIP_SPARC_SN:
|
||||
pagecv_flag = 0x00;
|
||||
break;
|
||||
|
@ -2313,6 +2324,7 @@ void __init paging_init(void)
|
|||
*/
|
||||
switch (sun4v_chip_type) {
|
||||
case SUN4V_CHIP_SPARC_M7:
|
||||
case SUN4V_CHIP_SPARC_M8:
|
||||
case SUN4V_CHIP_SPARC_SN:
|
||||
page_cache4v_flag = _PAGE_CP_4V;
|
||||
break;
|
||||
|
|
|
@ -45,7 +45,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
|
|||
}
|
||||
|
||||
static inline void
|
||||
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
||||
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
tlb->mm = mm;
|
||||
tlb->start = start;
|
||||
|
@ -80,13 +81,19 @@ tlb_flush_mmu(struct mmu_gather *tlb)
|
|||
tlb_flush_mmu_free(tlb);
|
||||
}
|
||||
|
||||
/* tlb_finish_mmu
|
||||
/* arch_tlb_finish_mmu
|
||||
* Called at the end of the shootdown operation to free up any resources
|
||||
* that were required.
|
||||
*/
|
||||
static inline void
|
||||
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
||||
arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end, bool force)
|
||||
{
|
||||
if (force) {
|
||||
tlb->start = start;
|
||||
tlb->end = end;
|
||||
tlb->need_flush = 1;
|
||||
}
|
||||
tlb_flush_mmu(tlb);
|
||||
|
||||
/* keep the page table cache within bounds */
|
||||
|
|
|
@ -43,6 +43,9 @@ struct hypervisor_x86 {
|
|||
|
||||
/* pin current vcpu to specified physical cpu (run rarely) */
|
||||
void (*pin_vcpu)(int);
|
||||
|
||||
/* called during init_mem_mapping() to setup early mappings. */
|
||||
void (*init_mem_mapping)(void);
|
||||
};
|
||||
|
||||
extern const struct hypervisor_x86 *x86_hyper;
|
||||
|
@ -57,8 +60,15 @@ extern const struct hypervisor_x86 x86_hyper_kvm;
|
|||
extern void init_hypervisor_platform(void);
|
||||
extern bool hypervisor_x2apic_available(void);
|
||||
extern void hypervisor_pin_vcpu(int cpu);
|
||||
|
||||
static inline void hypervisor_init_mem_mapping(void)
|
||||
{
|
||||
if (x86_hyper && x86_hyper->init_mem_mapping)
|
||||
x86_hyper->init_mem_mapping();
|
||||
}
|
||||
#else
|
||||
static inline void init_hypervisor_platform(void) { }
|
||||
static inline bool hypervisor_x2apic_available(void) { return false; }
|
||||
static inline void hypervisor_init_mem_mapping(void) { }
|
||||
#endif /* CONFIG_HYPERVISOR_GUEST */
|
||||
#endif /* _ASM_X86_HYPERVISOR_H */
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <asm/dma.h> /* for MAX_DMA_PFN */
|
||||
#include <asm/microcode.h>
|
||||
#include <asm/kaslr.h>
|
||||
#include <asm/hypervisor.h>
|
||||
|
||||
/*
|
||||
* We need to define the tracepoints somewhere, and tlb.c
|
||||
|
@ -636,6 +637,8 @@ void __init init_mem_mapping(void)
|
|||
load_cr3(swapper_pg_dir);
|
||||
__flush_tlb_all();
|
||||
|
||||
hypervisor_init_mem_mapping();
|
||||
|
||||
early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <asm/setup.h>
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/e820/api.h>
|
||||
#include <asm/early_ioremap.h>
|
||||
|
||||
#include <asm/xen/cpuid.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
|
@ -21,38 +22,50 @@
|
|||
#include "mmu.h"
|
||||
#include "smp.h"
|
||||
|
||||
void __ref xen_hvm_init_shared_info(void)
|
||||
static unsigned long shared_info_pfn;
|
||||
|
||||
void xen_hvm_init_shared_info(void)
|
||||
{
|
||||
struct xen_add_to_physmap xatp;
|
||||
u64 pa;
|
||||
|
||||
if (HYPERVISOR_shared_info == &xen_dummy_shared_info) {
|
||||
/*
|
||||
* Search for a free page starting at 4kB physical address.
|
||||
* Low memory is preferred to avoid an EPT large page split up
|
||||
* by the mapping.
|
||||
* Starting below X86_RESERVE_LOW (usually 64kB) is fine as
|
||||
* the BIOS used for HVM guests is well behaved and won't
|
||||
* clobber memory other than the first 4kB.
|
||||
*/
|
||||
for (pa = PAGE_SIZE;
|
||||
!e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
|
||||
memblock_is_reserved(pa);
|
||||
pa += PAGE_SIZE)
|
||||
;
|
||||
|
||||
memblock_reserve(pa, PAGE_SIZE);
|
||||
HYPERVISOR_shared_info = __va(pa);
|
||||
}
|
||||
|
||||
xatp.domid = DOMID_SELF;
|
||||
xatp.idx = 0;
|
||||
xatp.space = XENMAPSPACE_shared_info;
|
||||
xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
|
||||
xatp.gpfn = shared_info_pfn;
|
||||
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
|
||||
BUG();
|
||||
}
|
||||
|
||||
static void __init reserve_shared_info(void)
|
||||
{
|
||||
u64 pa;
|
||||
|
||||
/*
|
||||
* Search for a free page starting at 4kB physical address.
|
||||
* Low memory is preferred to avoid an EPT large page split up
|
||||
* by the mapping.
|
||||
* Starting below X86_RESERVE_LOW (usually 64kB) is fine as
|
||||
* the BIOS used for HVM guests is well behaved and won't
|
||||
* clobber memory other than the first 4kB.
|
||||
*/
|
||||
for (pa = PAGE_SIZE;
|
||||
!e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
|
||||
memblock_is_reserved(pa);
|
||||
pa += PAGE_SIZE)
|
||||
;
|
||||
|
||||
shared_info_pfn = PHYS_PFN(pa);
|
||||
|
||||
memblock_reserve(pa, PAGE_SIZE);
|
||||
HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static void __init xen_hvm_init_mem_mapping(void)
|
||||
{
|
||||
early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
|
||||
HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
|
||||
}
|
||||
|
||||
static void __init init_hvm_pv_info(void)
|
||||
{
|
||||
int major, minor;
|
||||
|
@ -153,6 +166,7 @@ static void __init xen_hvm_guest_init(void)
|
|||
|
||||
init_hvm_pv_info();
|
||||
|
||||
reserve_shared_info();
|
||||
xen_hvm_init_shared_info();
|
||||
|
||||
/*
|
||||
|
@ -218,5 +232,6 @@ const struct hypervisor_x86 x86_hyper_xen_hvm = {
|
|||
.init_platform = xen_hvm_guest_init,
|
||||
.pin_vcpu = xen_pin_vcpu,
|
||||
.x2apic_available = xen_x2apic_para_available,
|
||||
.init_mem_mapping = xen_hvm_init_mem_mapping,
|
||||
};
|
||||
EXPORT_SYMBOL(x86_hyper_xen_hvm);
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
generic-y += bug.h
|
||||
generic-y += clkdev.h
|
||||
generic-y += device.h
|
||||
generic-y += div64.h
|
||||
generic-y += dma-contiguous.h
|
||||
generic-y += emergency-restart.h
|
||||
|
@ -17,6 +18,7 @@ generic-y += local.h
|
|||
generic-y += local64.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += param.h
|
||||
generic-y += percpu.h
|
||||
generic-y += preempt.h
|
||||
generic-y += rwsem.h
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
/*
|
||||
* Arch specific extensions to struct device
|
||||
*
|
||||
* This file is released under the GPLv2
|
||||
*/
|
||||
#ifndef _ASM_XTENSA_DEVICE_H
|
||||
#define _ASM_XTENSA_DEVICE_H
|
||||
|
||||
struct dev_archdata {
|
||||
};
|
||||
|
||||
struct pdev_archdata {
|
||||
};
|
||||
|
||||
#endif /* _ASM_XTENSA_DEVICE_H */
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
* include/asm-xtensa/param.h
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
||||
*/
|
||||
#ifndef _XTENSA_PARAM_H
|
||||
#define _XTENSA_PARAM_H
|
||||
|
||||
#include <uapi/asm/param.h>
|
||||
|
||||
# define HZ CONFIG_HZ /* internal timer frequency */
|
||||
# define USER_HZ 100 /* for user interfaces in "ticks" */
|
||||
# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */
|
||||
#endif /* _XTENSA_PARAM_H */
|
|
@ -94,13 +94,11 @@ unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
|
|||
}
|
||||
EXPORT_SYMBOL(__sync_fetch_and_or_4);
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
/*
|
||||
* Networking support
|
||||
*/
|
||||
EXPORT_SYMBOL(csum_partial);
|
||||
EXPORT_SYMBOL(csum_partial_copy_generic);
|
||||
#endif /* CONFIG_NET */
|
||||
|
||||
/*
|
||||
* Architecture-specific symbols
|
||||
|
|
|
@ -103,6 +103,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
|
|||
clear_page_alias(kvaddr, paddr);
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(clear_user_highpage);
|
||||
|
||||
void copy_user_highpage(struct page *dst, struct page *src,
|
||||
unsigned long vaddr, struct vm_area_struct *vma)
|
||||
|
@ -119,10 +120,7 @@ void copy_user_highpage(struct page *dst, struct page *src,
|
|||
copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
|
||||
|
||||
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
|
||||
EXPORT_SYMBOL(copy_user_highpage);
|
||||
|
||||
/*
|
||||
* Any time the kernel writes to a user page cache page, or it is about to
|
||||
|
@ -176,7 +174,7 @@ void flush_dcache_page(struct page *page)
|
|||
|
||||
/* There shouldn't be an entry in the cache for this page anymore. */
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(flush_dcache_page);
|
||||
|
||||
/*
|
||||
* For now, flush the whole cache. FIXME??
|
||||
|
@ -188,6 +186,7 @@ void local_flush_cache_range(struct vm_area_struct *vma,
|
|||
__flush_invalidate_dcache_all();
|
||||
__invalidate_icache_all();
|
||||
}
|
||||
EXPORT_SYMBOL(local_flush_cache_range);
|
||||
|
||||
/*
|
||||
* Remove any entry in the cache for this page.
|
||||
|
@ -207,8 +206,9 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
|
|||
__flush_invalidate_dcache_page_alias(virt, phys);
|
||||
__invalidate_icache_page_alias(virt, phys);
|
||||
}
|
||||
EXPORT_SYMBOL(local_flush_cache_page);
|
||||
|
||||
#endif
|
||||
#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
|
||||
|
||||
void
|
||||
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
|
||||
|
@ -225,7 +225,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
|
|||
|
||||
flush_tlb_page(vma, addr);
|
||||
|
||||
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
|
||||
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
|
||||
|
||||
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
|
||||
unsigned long phys = page_to_phys(page);
|
||||
|
@ -256,7 +256,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
|
|||
* flush_dcache_page() on the page.
|
||||
*/
|
||||
|
||||
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
|
||||
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
|
||||
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, void *dst, const void *src,
|
||||
|
|
|
@ -71,17 +71,29 @@ struct bfq_service_tree {
|
|||
*
|
||||
* bfq_sched_data is the basic scheduler queue. It supports three
|
||||
* ioprio_classes, and can be used either as a toplevel queue or as an
|
||||
* intermediate queue on a hierarchical setup. @next_in_service
|
||||
* points to the active entity of the sched_data service trees that
|
||||
* will be scheduled next. It is used to reduce the number of steps
|
||||
* needed for each hierarchical-schedule update.
|
||||
* intermediate queue in a hierarchical setup.
|
||||
*
|
||||
* The supported ioprio_classes are the same as in CFQ, in descending
|
||||
* priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
|
||||
* Requests from higher priority queues are served before all the
|
||||
* requests from lower priority queues; among requests of the same
|
||||
* queue requests are served according to B-WF2Q+.
|
||||
* All the fields are protected by the queue lock of the containing bfqd.
|
||||
*
|
||||
* The schedule is implemented by the service trees, plus the field
|
||||
* @next_in_service, which points to the entity on the active trees
|
||||
* that will be served next, if 1) no changes in the schedule occurs
|
||||
* before the current in-service entity is expired, 2) the in-service
|
||||
* queue becomes idle when it expires, and 3) if the entity pointed by
|
||||
* in_service_entity is not a queue, then the in-service child entity
|
||||
* of the entity pointed by in_service_entity becomes idle on
|
||||
* expiration. This peculiar definition allows for the following
|
||||
* optimization, not yet exploited: while a given entity is still in
|
||||
* service, we already know which is the best candidate for next
|
||||
* service among the other active entitities in the same parent
|
||||
* entity. We can then quickly compare the timestamps of the
|
||||
* in-service entity with those of such best candidate.
|
||||
*
|
||||
* All fields are protected by the lock of the containing bfqd.
|
||||
*/
|
||||
struct bfq_sched_data {
|
||||
/* entity in service */
|
||||
|
|
146
block/bfq-wf2q.c
146
block/bfq-wf2q.c
|
@ -188,21 +188,23 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
|
|||
|
||||
/*
|
||||
* This function tells whether entity stops being a candidate for next
|
||||
* service, according to the following logic.
|
||||
* service, according to the restrictive definition of the field
|
||||
* next_in_service. In particular, this function is invoked for an
|
||||
* entity that is about to be set in service.
|
||||
*
|
||||
* This function is invoked for an entity that is about to be set in
|
||||
* service. If such an entity is a queue, then the entity is no longer
|
||||
* a candidate for next service (i.e, a candidate entity to serve
|
||||
* after the in-service entity is expired). The function then returns
|
||||
* true.
|
||||
* If entity is a queue, then the entity is no longer a candidate for
|
||||
* next service according to the that definition, because entity is
|
||||
* about to become the in-service queue. This function then returns
|
||||
* true if entity is a queue.
|
||||
*
|
||||
* In contrast, the entity could stil be a candidate for next service
|
||||
* if it is not a queue, and has more than one child. In fact, even if
|
||||
* one of its children is about to be set in service, other children
|
||||
* may still be the next to serve. As a consequence, a non-queue
|
||||
* entity is not a candidate for next-service only if it has only one
|
||||
* child. And only if this condition holds, then the function returns
|
||||
* true for a non-queue entity.
|
||||
* In contrast, entity could still be a candidate for next service if
|
||||
* it is not a queue, and has more than one active child. In fact,
|
||||
* even if one of its children is about to be set in service, other
|
||||
* active children may still be the next to serve, for the parent
|
||||
* entity, even according to the above definition. As a consequence, a
|
||||
* non-queue entity is not a candidate for next-service only if it has
|
||||
* only one active child. And only if this condition holds, then this
|
||||
* function returns true for a non-queue entity.
|
||||
*/
|
||||
static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
|
||||
{
|
||||
|
@ -213,6 +215,18 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
|
|||
|
||||
bfqg = container_of(entity, struct bfq_group, entity);
|
||||
|
||||
/*
|
||||
* The field active_entities does not always contain the
|
||||
* actual number of active children entities: it happens to
|
||||
* not account for the in-service entity in case the latter is
|
||||
* removed from its active tree (which may get done after
|
||||
* invoking the function bfq_no_longer_next_in_service in
|
||||
* bfq_get_next_queue). Fortunately, here, i.e., while
|
||||
* bfq_no_longer_next_in_service is not yet completed in
|
||||
* bfq_get_next_queue, bfq_active_extract has not yet been
|
||||
* invoked, and thus active_entities still coincides with the
|
||||
* actual number of active entities.
|
||||
*/
|
||||
if (bfqg->active_entities == 1)
|
||||
return true;
|
||||
|
||||
|
@ -954,7 +968,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
|
|||
* one of its children receives a new request.
|
||||
*
|
||||
* Basically, this function updates the timestamps of entity and
|
||||
* inserts entity into its active tree, ater possible extracting it
|
||||
* inserts entity into its active tree, ater possibly extracting it
|
||||
* from its idle tree.
|
||||
*/
|
||||
static void __bfq_activate_entity(struct bfq_entity *entity,
|
||||
|
@ -1048,7 +1062,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
|
|||
entity->start = entity->finish;
|
||||
/*
|
||||
* In addition, if the entity had more than one child
|
||||
* when set in service, then was not extracted from
|
||||
* when set in service, then it was not extracted from
|
||||
* the active tree. This implies that the position of
|
||||
* the entity in the active tree may need to be
|
||||
* changed now, because we have just updated the start
|
||||
|
@ -1056,9 +1070,8 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
|
|||
* time in a moment (the requeueing is then, more
|
||||
* precisely, a repositioning in this case). To
|
||||
* implement this repositioning, we: 1) dequeue the
|
||||
* entity here, 2) update the finish time and
|
||||
* requeue the entity according to the new
|
||||
* timestamps below.
|
||||
* entity here, 2) update the finish time and requeue
|
||||
* the entity according to the new timestamps below.
|
||||
*/
|
||||
if (entity->tree)
|
||||
bfq_active_extract(st, entity);
|
||||
|
@ -1105,9 +1118,10 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
|
|||
|
||||
|
||||
/**
|
||||
* bfq_activate_entity - activate or requeue an entity representing a bfq_queue,
|
||||
* and activate, requeue or reposition all ancestors
|
||||
* for which such an update becomes necessary.
|
||||
* bfq_activate_requeue_entity - activate or requeue an entity representing a
|
||||
* bfq_queue, and activate, requeue or reposition
|
||||
* all ancestors for which such an update becomes
|
||||
* necessary.
|
||||
* @entity: the entity to activate.
|
||||
* @non_blocking_wait_rq: true if this entity was waiting for a request
|
||||
* @requeue: true if this is a requeue, which implies that bfqq is
|
||||
|
@ -1135,9 +1149,9 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
|
|||
* @ins_into_idle_tree: if false, the entity will not be put into the
|
||||
* idle tree.
|
||||
*
|
||||
* Deactivates an entity, independently from its previous state. Must
|
||||
* Deactivates an entity, independently of its previous state. Must
|
||||
* be invoked only if entity is on a service tree. Extracts the entity
|
||||
* from that tree, and if necessary and allowed, puts it on the idle
|
||||
* from that tree, and if necessary and allowed, puts it into the idle
|
||||
* tree.
|
||||
*/
|
||||
bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
|
||||
|
@ -1158,8 +1172,10 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
|
|||
st = bfq_entity_service_tree(entity);
|
||||
is_in_service = entity == sd->in_service_entity;
|
||||
|
||||
if (is_in_service)
|
||||
if (is_in_service) {
|
||||
bfq_calc_finish(entity, entity->service);
|
||||
sd->in_service_entity = NULL;
|
||||
}
|
||||
|
||||
if (entity->tree == &st->active)
|
||||
bfq_active_extract(st, entity);
|
||||
|
@ -1177,7 +1193,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
|
|||
/**
|
||||
* bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
|
||||
* @entity: the entity to deactivate.
|
||||
* @ins_into_idle_tree: true if the entity can be put on the idle tree
|
||||
* @ins_into_idle_tree: true if the entity can be put into the idle tree
|
||||
*/
|
||||
static void bfq_deactivate_entity(struct bfq_entity *entity,
|
||||
bool ins_into_idle_tree,
|
||||
|
@ -1208,16 +1224,29 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
|
|||
*/
|
||||
bfq_update_next_in_service(sd, NULL);
|
||||
|
||||
if (sd->next_in_service)
|
||||
if (sd->next_in_service || sd->in_service_entity) {
|
||||
/*
|
||||
* The parent entity is still backlogged,
|
||||
* because next_in_service is not NULL. So, no
|
||||
* further upwards deactivation must be
|
||||
* performed. Yet, next_in_service has
|
||||
* changed. Then the schedule does need to be
|
||||
* updated upwards.
|
||||
* The parent entity is still active, because
|
||||
* either next_in_service or in_service_entity
|
||||
* is not NULL. So, no further upwards
|
||||
* deactivation must be performed. Yet,
|
||||
* next_in_service has changed. Then the
|
||||
* schedule does need to be updated upwards.
|
||||
*
|
||||
* NOTE If in_service_entity is not NULL, then
|
||||
* next_in_service may happen to be NULL,
|
||||
* although the parent entity is evidently
|
||||
* active. This happens if 1) the entity
|
||||
* pointed by in_service_entity is the only
|
||||
* active entity in the parent entity, and 2)
|
||||
* according to the definition of
|
||||
* next_in_service, the in_service_entity
|
||||
* cannot be considered as
|
||||
* next_in_service. See the comments on the
|
||||
* definition of next_in_service for details.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we get here, then the parent is no more
|
||||
|
@ -1494,47 +1523,34 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
|
|||
|
||||
/*
|
||||
* If entity is no longer a candidate for next
|
||||
* service, then we extract it from its active tree,
|
||||
* for the following reason. To further boost the
|
||||
* throughput in some special case, BFQ needs to know
|
||||
* which is the next candidate entity to serve, while
|
||||
* there is already an entity in service. In this
|
||||
* respect, to make it easy to compute/update the next
|
||||
* candidate entity to serve after the current
|
||||
* candidate has been set in service, there is a case
|
||||
* where it is necessary to extract the current
|
||||
* candidate from its service tree. Such a case is
|
||||
* when the entity just set in service cannot be also
|
||||
* a candidate for next service. Details about when
|
||||
* this conditions holds are reported in the comments
|
||||
* on the function bfq_no_longer_next_in_service()
|
||||
* invoked below.
|
||||
* service, then it must be extracted from its active
|
||||
* tree, so as to make sure that it won't be
|
||||
* considered when computing next_in_service. See the
|
||||
* comments on the function
|
||||
* bfq_no_longer_next_in_service() for details.
|
||||
*/
|
||||
if (bfq_no_longer_next_in_service(entity))
|
||||
bfq_active_extract(bfq_entity_service_tree(entity),
|
||||
entity);
|
||||
|
||||
/*
|
||||
* For the same reason why we may have just extracted
|
||||
* entity from its active tree, we may need to update
|
||||
* next_in_service for the sched_data of entity too,
|
||||
* regardless of whether entity has been extracted.
|
||||
* In fact, even if entity has not been extracted, a
|
||||
* descendant entity may get extracted. Such an event
|
||||
* would cause a change in next_in_service for the
|
||||
* level of the descendant entity, and thus possibly
|
||||
* back to upper levels.
|
||||
* Even if entity is not to be extracted according to
|
||||
* the above check, a descendant entity may get
|
||||
* extracted in one of the next iterations of this
|
||||
* loop. Such an event could cause a change in
|
||||
* next_in_service for the level of the descendant
|
||||
* entity, and thus possibly back to this level.
|
||||
*
|
||||
* We cannot perform the resulting needed update
|
||||
* before the end of this loop, because, to know which
|
||||
* is the correct next-to-serve candidate entity for
|
||||
* each level, we need first to find the leaf entity
|
||||
* to set in service. In fact, only after we know
|
||||
* which is the next-to-serve leaf entity, we can
|
||||
* discover whether the parent entity of the leaf
|
||||
* entity becomes the next-to-serve, and so on.
|
||||
* However, we cannot perform the resulting needed
|
||||
* update of next_in_service for this level before the
|
||||
* end of the whole loop, because, to know which is
|
||||
* the correct next-to-serve candidate entity for each
|
||||
* level, we need first to find the leaf entity to set
|
||||
* in service. In fact, only after we know which is
|
||||
* the next-to-serve leaf entity, we can discover
|
||||
* whether the parent entity of the leaf entity
|
||||
* becomes the next-to-serve, and so on.
|
||||
*/
|
||||
|
||||
}
|
||||
|
||||
bfqq = bfq_entity_to_bfqq(entity);
|
||||
|
|
|
@ -387,9 +387,11 @@ static void bio_integrity_verify_fn(struct work_struct *work)
|
|||
*/
|
||||
bool __bio_integrity_endio(struct bio *bio)
|
||||
{
|
||||
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status) {
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
|
||||
if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
|
||||
(bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) {
|
||||
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
|
||||
queue_work(kintegrityd_wq, &bip->bip_work);
|
||||
return false;
|
||||
|
|
|
@ -301,11 +301,12 @@ static struct request *blk_mq_get_request(struct request_queue *q,
|
|||
struct elevator_queue *e = q->elevator;
|
||||
struct request *rq;
|
||||
unsigned int tag;
|
||||
struct blk_mq_ctx *local_ctx = NULL;
|
||||
|
||||
blk_queue_enter_live(q);
|
||||
data->q = q;
|
||||
if (likely(!data->ctx))
|
||||
data->ctx = blk_mq_get_ctx(q);
|
||||
data->ctx = local_ctx = blk_mq_get_ctx(q);
|
||||
if (likely(!data->hctx))
|
||||
data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
|
||||
if (op & REQ_NOWAIT)
|
||||
|
@ -324,6 +325,10 @@ static struct request *blk_mq_get_request(struct request_queue *q,
|
|||
|
||||
tag = blk_mq_get_tag(data);
|
||||
if (tag == BLK_MQ_TAG_FAIL) {
|
||||
if (local_ctx) {
|
||||
blk_mq_put_ctx(local_ctx);
|
||||
data->ctx = NULL;
|
||||
}
|
||||
blk_queue_exit(q);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -356,12 +361,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
|
|||
|
||||
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
||||
|
||||
blk_mq_put_ctx(alloc_data.ctx);
|
||||
blk_queue_exit(q);
|
||||
|
||||
if (!rq)
|
||||
return ERR_PTR(-EWOULDBLOCK);
|
||||
|
||||
blk_mq_put_ctx(alloc_data.ctx);
|
||||
blk_queue_exit(q);
|
||||
|
||||
rq->__data_len = 0;
|
||||
rq->__sector = (sector_t) -1;
|
||||
rq->bio = rq->biotail = NULL;
|
||||
|
@ -407,11 +412,11 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
|||
|
||||
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
||||
|
||||
blk_queue_exit(q);
|
||||
|
||||
if (!rq)
|
||||
return ERR_PTR(-EWOULDBLOCK);
|
||||
|
||||
blk_queue_exit(q);
|
||||
|
||||
return rq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
|
||||
|
@ -679,8 +684,8 @@ EXPORT_SYMBOL(blk_mq_kick_requeue_list);
|
|||
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
|
||||
unsigned long msecs)
|
||||
{
|
||||
kblockd_schedule_delayed_work(&q->requeue_work,
|
||||
msecs_to_jiffies(msecs));
|
||||
kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
|
||||
msecs_to_jiffies(msecs));
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
|
||||
|
||||
|
|
|
@ -16,6 +16,16 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/serial_core.h>
|
||||
|
||||
/*
|
||||
* Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
|
||||
* occasionally getting stuck as 1. To avoid the potential for a hang, check
|
||||
* TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
|
||||
* implementations, so only do so if an affected platform is detected in
|
||||
* parse_spcr().
|
||||
*/
|
||||
bool qdf2400_e44_present;
|
||||
EXPORT_SYMBOL(qdf2400_e44_present);
|
||||
|
||||
/*
|
||||
* Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
|
||||
* Detect them by examining the OEM fields in the SPCR header, similiar to PCI
|
||||
|
@ -147,8 +157,30 @@ int __init parse_spcr(bool earlycon)
|
|||
goto done;
|
||||
}
|
||||
|
||||
if (qdf2400_erratum_44_present(&table->header))
|
||||
uart = "qdf2400_e44";
|
||||
/*
|
||||
* If the E44 erratum is required, then we need to tell the pl011
|
||||
* driver to implement the work-around.
|
||||
*
|
||||
* The global variable is used by the probe function when it
|
||||
* creates the UARTs, whether or not they're used as a console.
|
||||
*
|
||||
* If the user specifies "traditional" earlycon, the qdf2400_e44
|
||||
* console name matches the EARLYCON_DECLARE() statement, and
|
||||
* SPCR is not used. Parameter "earlycon" is false.
|
||||
*
|
||||
* If the user specifies "SPCR" earlycon, then we need to update
|
||||
* the console name so that it also says "qdf2400_e44". Parameter
|
||||
* "earlycon" is true.
|
||||
*
|
||||
* For consistency, if we change the console name, then we do it
|
||||
* for everyone, not just earlycon.
|
||||
*/
|
||||
if (qdf2400_erratum_44_present(&table->header)) {
|
||||
qdf2400_e44_present = true;
|
||||
if (earlycon)
|
||||
uart = "qdf2400_e44";
|
||||
}
|
||||
|
||||
if (xgene_8250_erratum_present(table))
|
||||
iotype = "mmio32";
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
#include <linux/syscore_ops.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/swait.h>
|
||||
|
||||
#include <generated/utsrelease.h>
|
||||
|
||||
|
@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void)
|
|||
* state of the firmware loading.
|
||||
*/
|
||||
struct fw_state {
|
||||
struct swait_queue_head wq;
|
||||
struct completion completion;
|
||||
enum fw_status status;
|
||||
};
|
||||
|
||||
static void fw_state_init(struct fw_state *fw_st)
|
||||
{
|
||||
init_swait_queue_head(&fw_st->wq);
|
||||
init_completion(&fw_st->completion);
|
||||
fw_st->status = FW_STATUS_UNKNOWN;
|
||||
}
|
||||
|
||||
|
@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
|
|||
{
|
||||
long ret;
|
||||
|
||||
ret = swait_event_interruptible_timeout(fw_st->wq,
|
||||
__fw_state_is_done(READ_ONCE(fw_st->status)),
|
||||
timeout);
|
||||
ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
|
||||
if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
|
||||
return -ENOENT;
|
||||
if (!ret)
|
||||
|
@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st,
|
|||
WRITE_ONCE(fw_st->status, status);
|
||||
|
||||
if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
|
||||
swake_up(&fw_st->wq);
|
||||
complete_all(&fw_st->completion);
|
||||
}
|
||||
|
||||
#define fw_state_start(fw_st) \
|
||||
__fw_state_set(fw_st, FW_STATUS_LOADING)
|
||||
#define fw_state_done(fw_st) \
|
||||
__fw_state_set(fw_st, FW_STATUS_DONE)
|
||||
#define fw_state_aborted(fw_st) \
|
||||
__fw_state_set(fw_st, FW_STATUS_ABORTED)
|
||||
#define fw_state_wait(fw_st) \
|
||||
__fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
|
||||
|
||||
#ifndef CONFIG_FW_LOADER_USER_HELPER
|
||||
|
||||
#define fw_state_is_aborted(fw_st) false
|
||||
|
||||
#else /* CONFIG_FW_LOADER_USER_HELPER */
|
||||
|
||||
static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
|
||||
{
|
||||
return fw_st->status == status;
|
||||
}
|
||||
|
||||
#define fw_state_is_aborted(fw_st) \
|
||||
__fw_state_check(fw_st, FW_STATUS_ABORTED)
|
||||
|
||||
#ifdef CONFIG_FW_LOADER_USER_HELPER
|
||||
|
||||
#define fw_state_aborted(fw_st) \
|
||||
__fw_state_set(fw_st, FW_STATUS_ABORTED)
|
||||
#define fw_state_is_done(fw_st) \
|
||||
__fw_state_check(fw_st, FW_STATUS_DONE)
|
||||
#define fw_state_is_loading(fw_st) \
|
||||
__fw_state_check(fw_st, FW_STATUS_LOADING)
|
||||
#define fw_state_is_aborted(fw_st) \
|
||||
__fw_state_check(fw_st, FW_STATUS_ABORTED)
|
||||
#define fw_state_wait_timeout(fw_st, timeout) \
|
||||
__fw_state_wait_common(fw_st, timeout)
|
||||
|
||||
|
@ -1200,6 +1196,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
|
|||
return 1; /* need to load */
|
||||
}
|
||||
|
||||
/*
|
||||
* Batched requests need only one wake, we need to do this step last due to the
|
||||
* fallback mechanism. The buf is protected with kref_get(), and it won't be
|
||||
* released until the last user calls release_firmware().
|
||||
*
|
||||
* Failed batched requests are possible as well, in such cases we just share
|
||||
* the struct firmware_buf and won't release it until all requests are woken
|
||||
* and have gone through this same path.
|
||||
*/
|
||||
static void fw_abort_batch_reqs(struct firmware *fw)
|
||||
{
|
||||
struct firmware_buf *buf;
|
||||
|
||||
/* Loaded directly? */
|
||||
if (!fw || !fw->priv)
|
||||
return;
|
||||
|
||||
buf = fw->priv;
|
||||
if (!fw_state_is_aborted(&buf->fw_st))
|
||||
fw_state_aborted(&buf->fw_st);
|
||||
}
|
||||
|
||||
/* called from request_firmware() and request_firmware_work_func() */
|
||||
static int
|
||||
_request_firmware(const struct firmware **firmware_p, const char *name,
|
||||
|
@ -1243,6 +1261,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
|
|||
|
||||
out:
|
||||
if (ret < 0) {
|
||||
fw_abort_batch_reqs(fw);
|
||||
release_firmware(fw);
|
||||
fw = NULL;
|
||||
}
|
||||
|
|
|
@ -875,6 +875,56 @@ static void print_version(void)
|
|||
printk(KERN_INFO "%s", version);
|
||||
}
|
||||
|
||||
struct vdc_check_port_data {
|
||||
int dev_no;
|
||||
char *type;
|
||||
};
|
||||
|
||||
static int vdc_device_probed(struct device *dev, void *arg)
|
||||
{
|
||||
struct vio_dev *vdev = to_vio_dev(dev);
|
||||
struct vdc_check_port_data *port_data;
|
||||
|
||||
port_data = (struct vdc_check_port_data *)arg;
|
||||
|
||||
if ((vdev->dev_no == port_data->dev_no) &&
|
||||
(!(strcmp((char *)&vdev->type, port_data->type))) &&
|
||||
dev_get_drvdata(dev)) {
|
||||
/* This device has already been configured
|
||||
* by vdc_port_probe()
|
||||
*/
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Determine whether the VIO device is part of an mpgroup
|
||||
* by locating all the virtual-device-port nodes associated
|
||||
* with the parent virtual-device node for the VIO device
|
||||
* and checking whether any of these nodes are vdc-ports
|
||||
* which have already been configured.
|
||||
*
|
||||
* Returns true if this device is part of an mpgroup and has
|
||||
* already been probed.
|
||||
*/
|
||||
static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
|
||||
{
|
||||
struct vdc_check_port_data port_data;
|
||||
struct device *dev;
|
||||
|
||||
port_data.dev_no = vdev->dev_no;
|
||||
port_data.type = (char *)&vdev->type;
|
||||
|
||||
dev = device_find_child(vdev->dev.parent, &port_data,
|
||||
vdc_device_probed);
|
||||
|
||||
if (dev)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||
{
|
||||
struct mdesc_handle *hp;
|
||||
|
@ -893,6 +943,14 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
|||
goto err_out_release_mdesc;
|
||||
}
|
||||
|
||||
/* Check if this device is part of an mpgroup */
|
||||
if (vdc_port_mpgroup_check(vdev)) {
|
||||
printk(KERN_WARNING
|
||||
"VIO: Ignoring extra vdisk port %s",
|
||||
dev_name(&vdev->dev));
|
||||
goto err_out_release_mdesc;
|
||||
}
|
||||
|
||||
port = kzalloc(sizeof(*port), GFP_KERNEL);
|
||||
err = -ENOMEM;
|
||||
if (!port) {
|
||||
|
@ -943,6 +1001,9 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
|||
if (err)
|
||||
goto err_out_free_tx_ring;
|
||||
|
||||
/* Note that the device driver_data is used to determine
|
||||
* whether the port has been probed.
|
||||
*/
|
||||
dev_set_drvdata(&vdev->dev, port);
|
||||
|
||||
mdesc_release(hp);
|
||||
|
|
|
@ -308,7 +308,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
|
|||
struct device_attribute *attr, const char *buf, size_t len)
|
||||
{
|
||||
struct zram *zram = dev_to_zram(dev);
|
||||
char compressor[CRYPTO_MAX_ALG_NAME];
|
||||
char compressor[ARRAY_SIZE(zram->compressor)];
|
||||
size_t sz;
|
||||
|
||||
strlcpy(compressor, buf, sizeof(compressor));
|
||||
|
@ -327,7 +327,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
strlcpy(zram->compressor, compressor, sizeof(compressor));
|
||||
strcpy(zram->compressor, compressor);
|
||||
up_write(&zram->init_lock);
|
||||
return len;
|
||||
}
|
||||
|
|
|
@ -1492,7 +1492,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
|
|||
#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
|
||||
print_once = true;
|
||||
#endif
|
||||
pr_notice("random: %s called from %pF with crng_init=%d\n",
|
||||
pr_notice("random: %s called from %pS with crng_init=%d\n",
|
||||
func_name, caller, crng_init);
|
||||
}
|
||||
|
||||
|
|
|
@ -235,6 +235,7 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len,
|
|||
return -1;
|
||||
}
|
||||
|
||||
extern u32 pnv_get_supported_cpuidle_states(void);
|
||||
static int powernv_add_idle_states(void)
|
||||
{
|
||||
struct device_node *power_mgt;
|
||||
|
@ -248,6 +249,8 @@ static int powernv_add_idle_states(void)
|
|||
const char *names[CPUIDLE_STATE_MAX];
|
||||
u32 has_stop_states = 0;
|
||||
int i, rc;
|
||||
u32 supported_flags = pnv_get_supported_cpuidle_states();
|
||||
|
||||
|
||||
/* Currently we have snooze statically defined */
|
||||
|
||||
|
@ -362,6 +365,13 @@ static int powernv_add_idle_states(void)
|
|||
for (i = 0; i < dt_idle_states; i++) {
|
||||
unsigned int exit_latency, target_residency;
|
||||
bool stops_timebase = false;
|
||||
|
||||
/*
|
||||
* Skip the platform idle state whose flag isn't in
|
||||
* the supported_cpuidle_states flag mask.
|
||||
*/
|
||||
if ((flags[i] & supported_flags) != flags[i])
|
||||
continue;
|
||||
/*
|
||||
* If an idle state has exit latency beyond
|
||||
* POWERNV_THRESHOLD_LATENCY_NS then don't use it
|
||||
|
|
|
@ -883,10 +883,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
|
||||
memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(istate.state); i++) {
|
||||
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
|
||||
if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
|
||||
ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
|
||||
ctx->base.needs_inv = true;
|
||||
|
@ -894,6 +891,9 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|||
}
|
||||
}
|
||||
|
||||
memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
|
||||
memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
|
|||
{
|
||||
struct sync_file *sync_file = file->private_data;
|
||||
|
||||
if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
|
||||
if (test_bit(POLL_ENABLED, &sync_file->flags))
|
||||
dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
|
||||
dma_fence_put(sync_file->fence);
|
||||
kfree(sync_file);
|
||||
|
@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
|
|||
|
||||
poll_wait(file, &sync_file->wq, wait);
|
||||
|
||||
if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
|
||||
if (list_empty(&sync_file->cb.node) &&
|
||||
!test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
|
||||
if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
|
||||
fence_check_cb_func) < 0)
|
||||
wake_up_all(&sync_file->wq);
|
||||
|
|
|
@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
|
|||
|
||||
/* port@2 is the output port */
|
||||
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
|
||||
if (ret)
|
||||
if (ret && ret != -ENODEV)
|
||||
return ret;
|
||||
|
||||
/* Shut down GPIO is optional */
|
||||
|
|
|
@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
|
||||
DRM_ERROR("relocation %u outside object", i);
|
||||
if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
|
||||
DRM_ERROR("relocation %u outside object\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -145,13 +145,19 @@ static struct drm_framebuffer *
|
|||
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd)
|
||||
{
|
||||
const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
|
||||
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
|
||||
struct drm_gem_object *obj;
|
||||
struct drm_framebuffer *fb;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
|
||||
for (i = 0; i < info->num_planes; i++) {
|
||||
unsigned int height = (i == 0) ? mode_cmd->height :
|
||||
DIV_ROUND_UP(mode_cmd->height, info->vsub);
|
||||
unsigned long size = height * mode_cmd->pitches[i] +
|
||||
mode_cmd->offsets[i];
|
||||
|
||||
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
|
||||
if (!obj) {
|
||||
DRM_ERROR("failed to lookup gem object\n");
|
||||
|
@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
|
|||
}
|
||||
|
||||
exynos_gem[i] = to_exynos_gem(obj);
|
||||
|
||||
if (size > exynos_gem[i]->size) {
|
||||
i++;
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
|
||||
|
|
|
@ -46,6 +46,8 @@
|
|||
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
|
||||
((a)->lrca == (b)->lrca))
|
||||
|
||||
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
|
||||
|
||||
static int context_switch_events[] = {
|
||||
[RCS] = RCS_AS_CONTEXT_SWITCH,
|
||||
[BCS] = BCS_AS_CONTEXT_SWITCH,
|
||||
|
@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|||
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct intel_vgpu_execlist *execlist =
|
||||
&vgpu->execlist[workload->ring_id];
|
||||
int ring_id = workload->ring_id;
|
||||
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
|
||||
struct intel_vgpu_workload *next_workload;
|
||||
struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
|
||||
struct list_head *next = workload_q_head(vgpu, ring_id)->next;
|
||||
bool lite_restore = false;
|
||||
int ret;
|
||||
|
||||
|
@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
|
|||
release_shadow_batch_buffer(workload);
|
||||
release_shadow_wa_ctx(&workload->wa_ctx);
|
||||
|
||||
if (workload->status || vgpu->resetting)
|
||||
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
|
||||
/* if workload->status is not successful means HW GPU
|
||||
* has occurred GPU hang or something wrong with i915/GVT,
|
||||
* and GVT won't inject context switch interrupt to guest.
|
||||
* So this error is a vGPU hang actually to the guest.
|
||||
* According to this we should emunlate a vGPU hang. If
|
||||
* there are pending workloads which are already submitted
|
||||
* from guest, we should clean them up like HW GPU does.
|
||||
*
|
||||
* if it is in middle of engine resetting, the pending
|
||||
* workloads won't be submitted to HW GPU and will be
|
||||
* cleaned up during the resetting process later, so doing
|
||||
* the workload clean up here doesn't have any impact.
|
||||
**/
|
||||
clean_workloads(vgpu, ENGINE_MASK(ring_id));
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
|
||||
if (!list_empty(workload_q_head(vgpu, ring_id))) {
|
||||
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
|
||||
|
||||
next_workload = container_of(next,
|
||||
|
|
|
@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
|
|||
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
|
||||
struct intel_gvt_mmio_info *e;
|
||||
struct gvt_mmio_block *block = gvt->mmio.mmio_block;
|
||||
int num = gvt->mmio.num_mmio_block;
|
||||
struct gvt_firmware_header *h;
|
||||
void *firmware;
|
||||
void *p;
|
||||
unsigned long size, crc32_start;
|
||||
int i;
|
||||
int i, j;
|
||||
int ret;
|
||||
|
||||
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
|
||||
|
@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
|
|||
hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
|
||||
*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
|
||||
|
||||
for (i = 0; i < num; i++, block++) {
|
||||
for (j = 0; j < block->size; j += 4)
|
||||
*(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
|
||||
I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
|
||||
block->offset) + j));
|
||||
}
|
||||
|
||||
memcpy(gvt->firmware.mmio, p, info->mmio_size);
|
||||
|
||||
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
|
||||
|
|
|
@ -149,7 +149,7 @@ struct intel_vgpu {
|
|||
bool active;
|
||||
bool pv_notified;
|
||||
bool failsafe;
|
||||
bool resetting;
|
||||
unsigned int resetting_eng;
|
||||
void *sched_data;
|
||||
struct vgpu_sched_ctl sched_ctl;
|
||||
|
||||
|
@ -195,6 +195,15 @@ struct intel_gvt_fence {
|
|||
unsigned long vgpu_allocated_fence_num;
|
||||
};
|
||||
|
||||
/* Special MMIO blocks. */
|
||||
struct gvt_mmio_block {
|
||||
unsigned int device;
|
||||
i915_reg_t offset;
|
||||
unsigned int size;
|
||||
gvt_mmio_func read;
|
||||
gvt_mmio_func write;
|
||||
};
|
||||
|
||||
#define INTEL_GVT_MMIO_HASH_BITS 11
|
||||
|
||||
struct intel_gvt_mmio {
|
||||
|
@ -214,6 +223,9 @@ struct intel_gvt_mmio {
|
|||
/* This reg could be accessed by unaligned address */
|
||||
#define F_UNALIGN (1 << 6)
|
||||
|
||||
struct gvt_mmio_block *mmio_block;
|
||||
unsigned int num_mmio_block;
|
||||
|
||||
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
|
||||
unsigned int num_tracked_mmio;
|
||||
};
|
||||
|
|
|
@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Special MMIO blocks. */
|
||||
static struct gvt_mmio_block {
|
||||
unsigned int device;
|
||||
i915_reg_t offset;
|
||||
unsigned int size;
|
||||
gvt_mmio_func read;
|
||||
gvt_mmio_func write;
|
||||
} gvt_mmio_blocks[] = {
|
||||
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
|
||||
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
|
||||
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
|
||||
pvinfo_mmio_read, pvinfo_mmio_write},
|
||||
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
|
||||
{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
|
||||
{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
|
||||
};
|
||||
|
||||
static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
|
||||
unsigned int offset)
|
||||
{
|
||||
unsigned long device = intel_gvt_get_device_type(gvt);
|
||||
struct gvt_mmio_block *block = gvt_mmio_blocks;
|
||||
struct gvt_mmio_block *block = gvt->mmio.mmio_block;
|
||||
int num = gvt->mmio.num_mmio_block;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
|
||||
for (i = 0; i < num; i++, block++) {
|
||||
if (!(device & block->device))
|
||||
continue;
|
||||
if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
|
||||
|
@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
|
|||
gvt->mmio.mmio_attribute = NULL;
|
||||
}
|
||||
|
||||
/* Special MMIO blocks. */
|
||||
static struct gvt_mmio_block mmio_blocks[] = {
|
||||
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
|
||||
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
|
||||
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
|
||||
pvinfo_mmio_read, pvinfo_mmio_write},
|
||||
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
|
||||
{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
|
||||
{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
|
||||
};
|
||||
|
||||
/**
|
||||
* intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
|
||||
* @gvt: GVT device
|
||||
|
@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
|
|||
goto err;
|
||||
}
|
||||
|
||||
gvt->mmio.mmio_block = mmio_blocks;
|
||||
gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
|
||||
|
||||
gvt_dbg_mmio("traced %u virtual mmio registers\n",
|
||||
gvt->mmio.num_tracked_mmio);
|
||||
return 0;
|
||||
|
@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
gvt_mmio_func func;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(bytes > 4))
|
||||
if (WARN_ON(bytes > 8))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
|
|
|
@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
|||
|
||||
i915_gem_request_put(fetch_and_zero(&workload->req));
|
||||
|
||||
if (!workload->status && !vgpu->resetting) {
|
||||
if (!workload->status && !(vgpu->resetting_eng &
|
||||
ENGINE_MASK(ring_id))) {
|
||||
update_guest_context(workload);
|
||||
|
||||
for_each_set_bit(event, workload->pending_events,
|
||||
|
|
|
@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
|||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
|
||||
|
||||
gvt_dbg_core("------------------------------------------\n");
|
||||
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
|
||||
vgpu->id, dmlr, engine_mask);
|
||||
vgpu->resetting = true;
|
||||
|
||||
vgpu->resetting_eng = resetting_eng;
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
/*
|
||||
|
@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
|||
mutex_lock(&gvt->lock);
|
||||
}
|
||||
|
||||
intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
|
||||
intel_vgpu_reset_execlist(vgpu, resetting_eng);
|
||||
|
||||
/* full GPU reset or device model level reset */
|
||||
if (engine_mask == ALL_ENGINES || dmlr) {
|
||||
|
@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
|||
}
|
||||
}
|
||||
|
||||
vgpu->resetting = false;
|
||||
vgpu->resetting_eng = 0;
|
||||
gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
|
||||
gvt_dbg_core("------------------------------------------\n");
|
||||
}
|
||||
|
|
|
@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
|
|||
return true;
|
||||
|
||||
case MUTEX_TRYLOCK_FAILED:
|
||||
*unlock = false;
|
||||
preempt_disable();
|
||||
do {
|
||||
cpu_relax();
|
||||
if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
|
||||
case MUTEX_TRYLOCK_SUCCESS:
|
||||
*unlock = true;
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
} while (!need_resched());
|
||||
preempt_enable();
|
||||
return *unlock;
|
||||
|
||||
return false;
|
||||
case MUTEX_TRYLOCK_SUCCESS:
|
||||
*unlock = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
BUG();
|
||||
|
|
|
@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
|
|||
u32 *cs;
|
||||
int i;
|
||||
|
||||
cs = intel_ring_begin(req, n_flex_regs * 2 + 4);
|
||||
cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1);
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
|
||||
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
|
||||
*cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
|
||||
|
|
|
@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
|
|||
}
|
||||
|
||||
/* Program the max register to clamp values > 1.0. */
|
||||
i = lut_size - 1;
|
||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
|
||||
drm_color_lut_extract(lut[i].red, 16));
|
||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
|
||||
|
|
|
@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
|
|||
|
||||
if (i915.invert_brightness > 0 ||
|
||||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
|
||||
return panel->backlight.max - val;
|
||||
return panel->backlight.max - val + panel->backlight.min;
|
||||
}
|
||||
|
||||
return val;
|
||||
|
|
|
@ -5,7 +5,7 @@ config DRM_MSM
|
|||
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
|
||||
depends on OF && COMMON_CLK
|
||||
depends on MMU
|
||||
select QCOM_MDT_LOADER
|
||||
select QCOM_MDT_LOADER if ARCH_QCOM
|
||||
select REGULATOR
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_PANEL
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
#include <linux/cpumask.h>
|
||||
#include <linux/qcom_scm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/of_reserved_mem.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/soc/qcom/mdt_loader.h>
|
||||
#include "msm_gem.h"
|
||||
#include "msm_mmu.h"
|
||||
|
@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
|
|||
|
||||
#define GPU_PAS_ID 13
|
||||
|
||||
#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
|
||||
|
||||
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
|
||||
{
|
||||
const struct firmware *fw;
|
||||
struct device_node *np;
|
||||
struct resource r;
|
||||
phys_addr_t mem_phys;
|
||||
ssize_t mem_size;
|
||||
void *mem_region = NULL;
|
||||
int ret;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ARCH_QCOM))
|
||||
return -EINVAL;
|
||||
|
||||
np = of_get_child_by_name(dev->of_node, "zap-shader");
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
|
||||
np = of_parse_phandle(np, "memory-region", 0);
|
||||
if (!np)
|
||||
return -EINVAL;
|
||||
|
||||
ret = of_address_to_resource(np, 0, &r);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mem_phys = r.start;
|
||||
mem_size = resource_size(&r);
|
||||
|
||||
/* Request the MDT file for the firmware */
|
||||
ret = request_firmware(&fw, fwname, dev);
|
||||
if (ret) {
|
||||
|
@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
|
|||
}
|
||||
|
||||
/* Allocate memory for the firmware image */
|
||||
mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL);
|
||||
mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
|
||||
if (!mem_region) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
|
|||
DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
|
||||
|
||||
out:
|
||||
if (mem_region)
|
||||
memunmap(mem_region);
|
||||
|
||||
release_firmware(fw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
struct msm_file_private *ctx)
|
||||
|
@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
|||
gpu->funcs->flush(gpu);
|
||||
}
|
||||
|
||||
struct a5xx_hwcg {
|
||||
static const struct {
|
||||
u32 offset;
|
||||
u32 value;
|
||||
};
|
||||
|
||||
static const struct a5xx_hwcg a530_hwcg[] = {
|
||||
} a5xx_hwcg[] = {
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
|
||||
{REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
|
||||
|
@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
|
|||
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
|
||||
};
|
||||
|
||||
static const struct {
|
||||
int (*test)(struct adreno_gpu *gpu);
|
||||
const struct a5xx_hwcg *regs;
|
||||
unsigned int count;
|
||||
} a5xx_hwcg_regs[] = {
|
||||
{ adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
|
||||
};
|
||||
|
||||
static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
|
||||
const struct a5xx_hwcg *regs, unsigned int count)
|
||||
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
gpu_write(gpu, regs[i].offset, regs[i].value);
|
||||
for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
|
||||
gpu_write(gpu, a5xx_hwcg[i].offset,
|
||||
state ? a5xx_hwcg[i].value : 0);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
|
||||
gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
|
||||
}
|
||||
|
||||
static void a5xx_enable_hwcg(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
|
||||
if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
|
||||
_a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
|
||||
a5xx_hwcg_regs[i].count);
|
||||
return;
|
||||
}
|
||||
}
|
||||
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
|
||||
gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
|
||||
}
|
||||
|
||||
static int a5xx_me_init(struct msm_gpu *gpu)
|
||||
|
@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Set up a child device to "own" the zap shader */
|
||||
static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
|
||||
{
|
||||
struct device_node *node;
|
||||
int ret;
|
||||
|
||||
if (dev->parent)
|
||||
return 0;
|
||||
|
||||
/* Find the sub-node for the zap shader */
|
||||
node = of_get_child_by_name(parent->of_node, "zap-shader");
|
||||
if (!node) {
|
||||
DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
dev->parent = parent;
|
||||
dev->of_node = node;
|
||||
dev_set_name(dev, "adreno_zap_shader");
|
||||
|
||||
ret = device_register(dev);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = of_reserved_mem_device_init(dev);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
|
||||
device_unregister(dev);
|
||||
}
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
dev->parent = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int a5xx_zap_shader_init(struct msm_gpu *gpu)
|
||||
{
|
||||
static bool loaded;
|
||||
|
@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev);
|
||||
|
||||
if (!ret)
|
||||
ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
|
||||
adreno_gpu->info->zapfw);
|
||||
ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
|
||||
|
||||
loaded = !ret;
|
||||
|
||||
|
@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
|||
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
|
||||
|
||||
/* Enable HWCG */
|
||||
a5xx_enable_hwcg(gpu);
|
||||
a5xx_set_hwcg(gpu, true);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
|
||||
|
||||
|
@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
|
|||
|
||||
DBG("%s", gpu->name);
|
||||
|
||||
if (a5xx_gpu->zap_dev.parent)
|
||||
device_unregister(&a5xx_gpu->zap_dev);
|
||||
|
||||
if (a5xx_gpu->pm4_bo) {
|
||||
if (a5xx_gpu->pm4_iova)
|
||||
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
|
||||
|
@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = {
|
|||
0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
|
||||
0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
|
||||
0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
|
||||
0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
|
||||
0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
|
||||
0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
|
||||
0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
|
||||
0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
|
||||
0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
|
||||
0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
|
||||
0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
|
||||
0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
|
||||
0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
|
||||
0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
|
||||
0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
|
||||
0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
|
||||
0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
|
||||
0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
|
||||
0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
|
||||
0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
|
||||
0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
|
||||
0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
|
||||
0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
|
||||
0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
|
||||
0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
|
||||
0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
|
||||
0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
|
||||
~0
|
||||
0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
|
||||
0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
|
||||
0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
|
||||
0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
|
||||
0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
|
||||
0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
|
||||
0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
|
||||
0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
|
||||
0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
|
||||
0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
|
||||
0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
|
||||
0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
|
||||
0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
|
||||
0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
|
||||
0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
|
||||
0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
|
||||
0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
|
||||
0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
|
||||
0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
|
||||
0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
|
||||
0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
|
||||
0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
|
||||
0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
|
||||
0xB9A0, 0xB9BF, ~0
|
||||
};
|
||||
|
||||
static void a5xx_dump(struct msm_gpu *gpu)
|
||||
|
@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
|
|||
{
|
||||
seq_printf(m, "status: %08x\n",
|
||||
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
|
||||
|
||||
/*
|
||||
* Temporarily disable hardware clock gating before going into
|
||||
* adreno_show to avoid issues while reading the registers
|
||||
*/
|
||||
a5xx_set_hwcg(gpu, false);
|
||||
adreno_show(gpu, m);
|
||||
a5xx_set_hwcg(gpu, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -36,8 +36,6 @@ struct a5xx_gpu {
|
|||
uint32_t gpmu_dwords;
|
||||
|
||||
uint32_t lm_leakage;
|
||||
|
||||
struct device zap_dev;
|
||||
};
|
||||
|
||||
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
|
||||
|
@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
|
|||
}
|
||||
|
||||
bool a5xx_idle(struct msm_gpu *gpu);
|
||||
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
|
||||
|
||||
#endif /* __A5XX_GPU_H__ */
|
||||
|
|
|
@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
|
|||
*value = adreno_gpu->base.fast_rate;
|
||||
return 0;
|
||||
case MSM_PARAM_TIMESTAMP:
|
||||
if (adreno_gpu->funcs->get_timestamp)
|
||||
return adreno_gpu->funcs->get_timestamp(gpu, value);
|
||||
if (adreno_gpu->funcs->get_timestamp) {
|
||||
int ret;
|
||||
|
||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||
ret = adreno_gpu->funcs->get_timestamp(gpu, value);
|
||||
pm_runtime_put_autosuspend(&gpu->pdev->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
return -EINVAL;
|
||||
default:
|
||||
DBG("%s: invalid param: %u", gpu->name, param);
|
||||
|
|
|
@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
|
|||
struct msm_dsi_phy_clk_request *clk_req)
|
||||
{
|
||||
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
|
||||
int ret;
|
||||
|
||||
ret = dsi_calc_clk_rate(msm_host);
|
||||
if (ret) {
|
||||
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
|
||||
return;
|
||||
}
|
||||
|
||||
clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
|
||||
clk_req->escclk_rate = msm_host->esc_clk_rate;
|
||||
|
@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
|
|||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
|
||||
int ret;
|
||||
|
||||
if (msm_host->mode) {
|
||||
drm_mode_destroy(msm_host->dev, msm_host->mode);
|
||||
|
@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = dsi_calc_clk_rate(msm_host);
|
||||
if (ret) {
|
||||
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc)
|
|||
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
|
||||
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
|
||||
unsigned long flags;
|
||||
enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
|
||||
enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
|
||||
enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
|
||||
enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
|
||||
int i, plane_cnt = 0;
|
||||
bool bg_alpha_enabled = false;
|
||||
u32 mixer_op_mode = 0;
|
||||
|
@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
if (!handle) {
|
||||
DBG("Cursor off");
|
||||
cursor_enable = false;
|
||||
mdp5_enable(mdp5_kms);
|
||||
goto set_cursor;
|
||||
}
|
||||
|
||||
|
@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
|
||||
get_roi(crtc, &roi_w, &roi_h);
|
||||
|
||||
mdp5_enable(mdp5_kms);
|
||||
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
|
||||
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
|
||||
|
@ -804,6 +807,7 @@ set_cursor:
|
|||
crtc_flush(crtc, flush_mask);
|
||||
|
||||
end:
|
||||
mdp5_disable(mdp5_kms);
|
||||
if (old_bo) {
|
||||
drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
|
||||
/* enable vblank to complete cursor work: */
|
||||
|
@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
|
|||
|
||||
get_roi(crtc, &roi_w, &roi_h);
|
||||
|
||||
mdp5_enable(mdp5_kms);
|
||||
|
||||
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
|
||||
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
|
||||
|
@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
|
|||
|
||||
crtc_flush(crtc, flush_mask);
|
||||
|
||||
mdp5_disable(mdp5_kms);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
|
|||
struct mdp5_interface *intf = mdp5_encoder->intf;
|
||||
|
||||
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
|
||||
mdp5_cmd_encoder_disable(encoder);
|
||||
mdp5_cmd_encoder_enable(encoder);
|
||||
else
|
||||
mdp5_vid_encoder_enable(encoder);
|
||||
}
|
||||
|
|
|
@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
|
|||
const char *name, bool mandatory)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct clk *clk = devm_clk_get(dev, name);
|
||||
struct clk *clk = msm_clk_get(pdev, name);
|
||||
if (IS_ERR(clk) && mandatory) {
|
||||
dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
|
||||
return PTR_ERR(clk);
|
||||
|
@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
|
|||
}
|
||||
|
||||
/* mandatory clocks: */
|
||||
ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);
|
||||
ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
|
||||
if (ret)
|
||||
goto fail;
|
||||
ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
|
||||
ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
|
||||
if (ret)
|
||||
goto fail;
|
||||
ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
|
||||
ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
|
||||
if (ret)
|
||||
goto fail;
|
||||
ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);
|
||||
ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* optional clocks: */
|
||||
get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);
|
||||
get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
|
||||
|
||||
/* we need to set a default rate before enabling. Set a safe
|
||||
* rate first, then figure out hw revision, and then set a
|
||||
|
|
|
@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
|
|||
struct mdp5_hw_pipe *right_hwpipe;
|
||||
const struct mdp_format *format;
|
||||
uint32_t nplanes, config = 0;
|
||||
struct phase_step step = { 0 };
|
||||
struct pixel_ext pe = { 0 };
|
||||
struct phase_step step = { { 0 } };
|
||||
struct pixel_ext pe = { { 0 } };
|
||||
uint32_t hdecm = 0, vdecm = 0;
|
||||
uint32_t pix_format;
|
||||
unsigned int rotation;
|
||||
|
|
|
@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
|
|||
struct page **pages;
|
||||
|
||||
vma = add_vma(obj, aspace);
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
pages = get_pages(obj);
|
||||
if (IS_ERR(pages)) {
|
||||
|
@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
|
|||
|
||||
fail:
|
||||
del_vma(vma);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
|||
if (use_vram) {
|
||||
struct msm_gem_vma *vma;
|
||||
struct page **pages;
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
|
||||
vma = add_vma(obj, NULL);
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
goto fail;
|
||||
|
|
|
@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
|
|||
struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
|
||||
{
|
||||
struct msm_gem_submit *submit;
|
||||
uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
|
||||
(nr_cmds * sizeof(submit->cmd[0]));
|
||||
uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
|
||||
((u64)nr_cmds * sizeof(submit->cmd[0]));
|
||||
|
||||
if (sz > SIZE_MAX)
|
||||
return NULL;
|
||||
|
@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
|
||||
if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
|
||||
ret = submit_fence_sync(submit);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
|
|
@ -42,7 +42,7 @@ void
|
|||
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
|
||||
struct msm_gem_vma *vma, struct sg_table *sgt)
|
||||
{
|
||||
if (!vma->iova)
|
||||
if (!aspace || !vma->iova)
|
||||
return;
|
||||
|
||||
if (aspace->mmu) {
|
||||
|
|
|
@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
|
|||
/* Create output path objects for each VBIOS display path. */
|
||||
i = -1;
|
||||
while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
|
||||
if (ver < 0x40) /* No support for chipsets prior to NV50. */
|
||||
break;
|
||||
if (dcbE.type == DCB_OUTPUT_UNUSED)
|
||||
continue;
|
||||
if (dcbE.type == DCB_OUTPUT_EOL)
|
||||
|
|
|
@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop)
|
|||
static int vop_enable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct vop *vop = to_vop(crtc);
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
ret = pm_runtime_get_sync(vop->dev);
|
||||
if (ret < 0) {
|
||||
|
@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc)
|
|||
}
|
||||
|
||||
memcpy(vop->regs, vop->regsbak, vop->len);
|
||||
/*
|
||||
* We need to make sure that all windows are disabled before we
|
||||
* enable the crtc. Otherwise we might try to scan from a destroyed
|
||||
* buffer later.
|
||||
*/
|
||||
for (i = 0; i < vop->data->win_size; i++) {
|
||||
struct vop_win *vop_win = &vop->win[i];
|
||||
const struct vop_win_data *win = vop_win->data;
|
||||
|
||||
spin_lock(&vop->reg_lock);
|
||||
VOP_WIN_SET(vop, win, enable, 0);
|
||||
spin_unlock(&vop->reg_lock);
|
||||
}
|
||||
|
||||
vop_cfg_done(vop);
|
||||
|
||||
/*
|
||||
|
@ -566,28 +580,11 @@ err_put_pm_runtime:
|
|||
static void vop_crtc_disable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct vop *vop = to_vop(crtc);
|
||||
int i;
|
||||
|
||||
WARN_ON(vop->event);
|
||||
|
||||
rockchip_drm_psr_deactivate(&vop->crtc);
|
||||
|
||||
/*
|
||||
* We need to make sure that all windows are disabled before we
|
||||
* disable that crtc. Otherwise we might try to scan from a destroyed
|
||||
* buffer later.
|
||||
*/
|
||||
for (i = 0; i < vop->data->win_size; i++) {
|
||||
struct vop_win *vop_win = &vop->win[i];
|
||||
const struct vop_win_data *win = vop_win->data;
|
||||
|
||||
spin_lock(&vop->reg_lock);
|
||||
VOP_WIN_SET(vop, win, enable, 0);
|
||||
spin_unlock(&vop->reg_lock);
|
||||
}
|
||||
|
||||
vop_cfg_done(vop);
|
||||
|
||||
drm_crtc_vblank_off(crtc);
|
||||
|
||||
/*
|
||||
|
@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
|
|||
* Src.x1 can be odd when do clip, but yuv plane start point
|
||||
* need align with 2 pixel.
|
||||
*/
|
||||
if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))
|
||||
if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
|
||||
DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
|
|||
spin_lock(&vop->reg_lock);
|
||||
|
||||
VOP_WIN_SET(vop, win, format, format);
|
||||
VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
|
||||
VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
|
||||
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
|
||||
if (is_yuv_support(fb->format->format)) {
|
||||
int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
|
||||
|
@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
|
|||
offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
|
||||
|
||||
dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
|
||||
VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
|
||||
VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
|
||||
VOP_WIN_SET(vop, win, uv_mst, dma_addr);
|
||||
}
|
||||
|
||||
|
|
|
@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
|
|||
|
||||
act_height = (src_h + vskiplines - 1) / vskiplines;
|
||||
|
||||
if (act_height == dst_h)
|
||||
return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
|
||||
|
||||
return GET_SCL_FT_BILI_DN(act_height, dst_h);
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@ config DRM_STM
|
|||
select DRM_PANEL
|
||||
select VIDEOMODE_HELPERS
|
||||
select FB_PROVIDE_GET_FB_UNMAPPED_AREA
|
||||
default y
|
||||
|
||||
help
|
||||
Enable support for the on-chip display controller on
|
||||
|
|
|
@ -983,7 +983,7 @@ config I2C_UNIPHIER_F
|
|||
|
||||
config I2C_VERSATILE
|
||||
tristate "ARM Versatile/Realview I2C bus support"
|
||||
depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
|
||||
depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
|
||||
select I2C_ALGOBIT
|
||||
help
|
||||
Say yes if you want to support the I2C serial bus on ARMs Versatile
|
||||
|
|
|
@ -298,6 +298,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
|
||||
/* Some broken DSTDs use 1MiHz instead of 1MHz */
|
||||
if (acpi_speed == 1048576)
|
||||
acpi_speed = 1000000;
|
||||
/*
|
||||
* Find bus speed from the "clock-frequency" device property, ACPI
|
||||
* or by using fast mode if neither is set.
|
||||
|
@ -319,7 +322,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
|
|||
if (dev->clk_freq != 100000 && dev->clk_freq != 400000
|
||||
&& dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
|
||||
dev_err(&pdev->dev,
|
||||
"Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");
|
||||
"%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",
|
||||
dev->clk_freq);
|
||||
ret = -EINVAL;
|
||||
goto exit_reset;
|
||||
}
|
||||
|
|
|
@ -230,6 +230,16 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
|
|||
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
|
||||
}
|
||||
|
||||
const struct acpi_device_id *
|
||||
i2c_acpi_match_device(const struct acpi_device_id *matches,
|
||||
struct i2c_client *client)
|
||||
{
|
||||
if (!(client && matches))
|
||||
return NULL;
|
||||
|
||||
return acpi_match_device(matches, &client->dev);
|
||||
}
|
||||
|
||||
static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
|
||||
void *data, void **return_value)
|
||||
{
|
||||
|
@ -289,7 +299,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
|
||||
|
||||
static int i2c_acpi_match_adapter(struct device *dev, void *data)
|
||||
static int i2c_acpi_find_match_adapter(struct device *dev, void *data)
|
||||
{
|
||||
struct i2c_adapter *adapter = i2c_verify_adapter(dev);
|
||||
|
||||
|
@ -299,7 +309,7 @@ static int i2c_acpi_match_adapter(struct device *dev, void *data)
|
|||
return ACPI_HANDLE(dev) == (acpi_handle)data;
|
||||
}
|
||||
|
||||
static int i2c_acpi_match_device(struct device *dev, void *data)
|
||||
static int i2c_acpi_find_match_device(struct device *dev, void *data)
|
||||
{
|
||||
return ACPI_COMPANION(dev) == data;
|
||||
}
|
||||
|
@ -309,7 +319,7 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
|
|||
struct device *dev;
|
||||
|
||||
dev = bus_find_device(&i2c_bus_type, NULL, handle,
|
||||
i2c_acpi_match_adapter);
|
||||
i2c_acpi_find_match_adapter);
|
||||
return dev ? i2c_verify_adapter(dev) : NULL;
|
||||
}
|
||||
|
||||
|
@ -317,7 +327,8 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
|
|||
{
|
||||
struct device *dev;
|
||||
|
||||
dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device);
|
||||
dev = bus_find_device(&i2c_bus_type, NULL, adev,
|
||||
i2c_acpi_find_match_device);
|
||||
return dev ? i2c_verify_client(dev) : NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -357,6 +357,7 @@ static int i2c_device_probe(struct device *dev)
|
|||
* Tree match table entry is supplied for the probing device.
|
||||
*/
|
||||
if (!driver->id_table &&
|
||||
!i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
|
||||
!i2c_of_match_device(dev->driver->of_match_table, client))
|
||||
return -ENODEV;
|
||||
|
||||
|
|
|
@ -31,9 +31,18 @@ int i2c_check_addr_validity(unsigned addr, unsigned short flags);
|
|||
int i2c_check_7bit_addr_validity_strict(unsigned short addr);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
const struct acpi_device_id *
|
||||
i2c_acpi_match_device(const struct acpi_device_id *matches,
|
||||
struct i2c_client *client);
|
||||
void i2c_acpi_register_devices(struct i2c_adapter *adap);
|
||||
#else /* CONFIG_ACPI */
|
||||
static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
|
||||
static inline const struct acpi_device_id *
|
||||
i2c_acpi_match_device(const struct acpi_device_id *matches,
|
||||
struct i2c_client *client)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_ACPI */
|
||||
extern struct notifier_block i2c_acpi_notifier;
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ config I2C_MUX_PINCTRL
|
|||
different sets of pins at run-time.
|
||||
|
||||
This driver can also be built as a module. If so, the module will be
|
||||
called pinctrl-i2cmux.
|
||||
called i2c-mux-pinctrl.
|
||||
|
||||
config I2C_MUX_REG
|
||||
tristate "Register-based I2C multiplexer"
|
||||
|
|
|
@ -193,7 +193,6 @@ struct bmc150_accel_data {
|
|||
struct regmap *regmap;
|
||||
int irq;
|
||||
struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
|
||||
atomic_t active_intr;
|
||||
struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
|
||||
struct mutex mutex;
|
||||
u8 fifo_mode, watermark;
|
||||
|
@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
|
|||
goto out_fix_power_state;
|
||||
}
|
||||
|
||||
if (state)
|
||||
atomic_inc(&data->active_intr);
|
||||
else
|
||||
atomic_dec(&data->active_intr);
|
||||
|
||||
return 0;
|
||||
|
||||
out_fix_power_state:
|
||||
|
@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev)
|
|||
struct bmc150_accel_data *data = iio_priv(indio_dev);
|
||||
|
||||
mutex_lock(&data->mutex);
|
||||
if (atomic_read(&data->active_intr))
|
||||
bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
|
||||
bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
|
||||
bmc150_accel_fifo_set_mode(data);
|
||||
mutex_unlock(&data->mutex);
|
||||
|
||||
|
|
|
@ -166,6 +166,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
|||
.mask_ihl = 0x02,
|
||||
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
||||
},
|
||||
.sim = {
|
||||
.addr = 0x23,
|
||||
.value = BIT(0),
|
||||
},
|
||||
.multi_read_bit = true,
|
||||
.bootime = 2,
|
||||
},
|
||||
|
@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
|||
.mask_od = 0x40,
|
||||
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
||||
},
|
||||
.sim = {
|
||||
.addr = 0x23,
|
||||
.value = BIT(0),
|
||||
},
|
||||
.multi_read_bit = true,
|
||||
.bootime = 2,
|
||||
},
|
||||
|
@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
|||
.en_mask = 0x08,
|
||||
},
|
||||
},
|
||||
.sim = {
|
||||
.addr = 0x24,
|
||||
.value = BIT(0),
|
||||
},
|
||||
.multi_read_bit = false,
|
||||
.bootime = 2,
|
||||
},
|
||||
|
@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
|||
.mask_int1 = 0x04,
|
||||
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
||||
},
|
||||
.sim = {
|
||||
.addr = 0x21,
|
||||
.value = BIT(1),
|
||||
},
|
||||
.multi_read_bit = true,
|
||||
.bootime = 2, /* guess */
|
||||
},
|
||||
|
@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
|||
.mask_od = 0x40,
|
||||
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
||||
},
|
||||
.sim = {
|
||||
.addr = 0x21,
|
||||
.value = BIT(7),
|
||||
},
|
||||
.multi_read_bit = false,
|
||||
.bootime = 2, /* guess */
|
||||
},
|
||||
|
@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
|||
.addr_ihl = 0x22,
|
||||
.mask_ihl = 0x80,
|
||||
},
|
||||
.sim = {
|
||||
.addr = 0x23,
|
||||
.value = BIT(0),
|
||||
},
|
||||
.multi_read_bit = true,
|
||||
.bootime = 2,
|
||||
},
|
||||
|
@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
|||
.mask_int1 = 0x04,
|
||||
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
||||
},
|
||||
.sim = {
|
||||
.addr = 0x21,
|
||||
.value = BIT(1),
|
||||
},
|
||||
.multi_read_bit = false,
|
||||
.bootime = 2,
|
||||
},
|
||||
|
@ -614,6 +642,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
|
|||
.mask_ihl = 0x02,
|
||||
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
|
||||
},
|
||||
.sim = {
|
||||
.addr = 0x23,
|
||||
.value = BIT(0),
|
||||
},
|
||||
.multi_read_bit = true,
|
||||
.bootime = 2,
|
||||
},
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
#include <linux/iio/iio.h>
|
||||
#include <linux/iio/driver.h>
|
||||
#include <linux/iopoll.h>
|
||||
|
||||
#define ASPEED_RESOLUTION_BITS 10
|
||||
#define ASPEED_CLOCKS_PER_SAMPLE 12
|
||||
|
@ -38,11 +39,17 @@
|
|||
|
||||
#define ASPEED_ENGINE_ENABLE BIT(0)
|
||||
|
||||
#define ASPEED_ADC_CTRL_INIT_RDY BIT(8)
|
||||
|
||||
#define ASPEED_ADC_INIT_POLLING_TIME 500
|
||||
#define ASPEED_ADC_INIT_TIMEOUT 500000
|
||||
|
||||
struct aspeed_adc_model_data {
|
||||
const char *model_name;
|
||||
unsigned int min_sampling_rate; // Hz
|
||||
unsigned int max_sampling_rate; // Hz
|
||||
unsigned int vref_voltage; // mV
|
||||
bool wait_init_sequence;
|
||||
};
|
||||
|
||||
struct aspeed_adc_data {
|
||||
|
@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev)
|
|||
goto scaler_error;
|
||||
}
|
||||
|
||||
model_data = of_device_get_match_data(&pdev->dev);
|
||||
|
||||
if (model_data->wait_init_sequence) {
|
||||
/* Enable engine in normal mode. */
|
||||
writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE,
|
||||
data->base + ASPEED_REG_ENGINE_CONTROL);
|
||||
|
||||
/* Wait for initial sequence complete. */
|
||||
ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL,
|
||||
adc_engine_control_reg_val,
|
||||
adc_engine_control_reg_val &
|
||||
ASPEED_ADC_CTRL_INIT_RDY,
|
||||
ASPEED_ADC_INIT_POLLING_TIME,
|
||||
ASPEED_ADC_INIT_TIMEOUT);
|
||||
if (ret)
|
||||
goto scaler_error;
|
||||
}
|
||||
|
||||
/* Start all channels in normal mode. */
|
||||
ret = clk_prepare_enable(data->clk_scaler->clk);
|
||||
if (ret)
|
||||
|
@ -274,6 +299,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = {
|
|||
.vref_voltage = 1800, // mV
|
||||
.min_sampling_rate = 1,
|
||||
.max_sampling_rate = 1000000,
|
||||
.wait_init_sequence = true,
|
||||
};
|
||||
|
||||
static const struct of_device_id aspeed_adc_matches[] = {
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
#include <linux/iio/driver.h>
|
||||
|
||||
#define AXP288_ADC_EN_MASK 0xF1
|
||||
#define AXP288_ADC_TS_PIN_GPADC 0xF2
|
||||
#define AXP288_ADC_TS_PIN_ON 0xF3
|
||||
|
||||
enum axp288_adc_id {
|
||||
AXP288_ADC_TS,
|
||||
|
@ -121,6 +123,26 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
|
|||
return IIO_VAL_INT;
|
||||
}
|
||||
|
||||
static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
|
||||
unsigned long address)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* channels other than GPADC do not need to switch TS pin */
|
||||
if (address != AXP288_GP_ADC_H)
|
||||
return 0;
|
||||
|
||||
ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* When switching to the GPADC pin give things some time to settle */
|
||||
if (mode == AXP288_ADC_TS_PIN_GPADC)
|
||||
usleep_range(6000, 10000);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int axp288_adc_read_raw(struct iio_dev *indio_dev,
|
||||
struct iio_chan_spec const *chan,
|
||||
int *val, int *val2, long mask)
|
||||
|
@ -131,7 +153,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
|
|||
mutex_lock(&indio_dev->mlock);
|
||||
switch (mask) {
|
||||
case IIO_CHAN_INFO_RAW:
|
||||
if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
|
||||
chan->address)) {
|
||||
dev_err(&indio_dev->dev, "GPADC mode\n");
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
ret = axp288_adc_read_channel(val, chan->address, info->regmap);
|
||||
if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
|
||||
chan->address))
|
||||
dev_err(&indio_dev->dev, "TS pin restore\n");
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
@ -141,6 +172,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int axp288_adc_set_state(struct regmap *regmap)
|
||||
{
|
||||
/* ADC should be always enabled for internal FG to function */
|
||||
if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
|
||||
return -EIO;
|
||||
|
||||
return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
|
||||
}
|
||||
|
||||
static const struct iio_info axp288_adc_iio_info = {
|
||||
.read_raw = &axp288_adc_read_raw,
|
||||
.driver_module = THIS_MODULE,
|
||||
|
@ -169,7 +209,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
|
|||
* Set ADC to enabled state at all time, including system suspend.
|
||||
* otherwise internal fuel gauge functionality may be affected.
|
||||
*/
|
||||
ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
|
||||
ret = axp288_adc_set_state(axp20x->regmap);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "unable to enable ADC device\n");
|
||||
return ret;
|
||||
|
|
|
@ -256,6 +256,7 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val,
|
|||
|
||||
err:
|
||||
pm_runtime_put_autosuspend(indio_dev->dev.parent);
|
||||
disable_irq(irq);
|
||||
mutex_unlock(&info->mutex);
|
||||
|
||||
return ret;
|
||||
|
@ -365,7 +366,6 @@ static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id)
|
|||
complete(&info->completion);
|
||||
|
||||
out:
|
||||
disable_irq_nosync(info->temp_data_irq);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -380,7 +380,6 @@ static irqreturn_t sun4i_gpadc_fifo_data_irq_handler(int irq, void *dev_id)
|
|||
complete(&info->completion);
|
||||
|
||||
out:
|
||||
disable_irq_nosync(info->fifo_data_irq);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@
|
|||
#define VF610_ADC_ADSTS_MASK 0x300
|
||||
#define VF610_ADC_ADLPC_EN 0x80
|
||||
#define VF610_ADC_ADHSC_EN 0x400
|
||||
#define VF610_ADC_REFSEL_VALT 0x100
|
||||
#define VF610_ADC_REFSEL_VALT 0x800
|
||||
#define VF610_ADC_REFSEL_VBG 0x1000
|
||||
#define VF610_ADC_ADTRG_HARD 0x2000
|
||||
#define VF610_ADC_AVGS_8 0x4000
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue