Merge branch 'fixes' into next
Merge our fixes branch, a few of them are tripping people up while working on top of next, and we also have a dependency between the CXL fixes and new CXL code we want to merge into next.
This commit is contained in:
commit
218ea31039
|
@ -866,6 +866,15 @@
|
|||
|
||||
dscc4.setup= [NET]
|
||||
|
||||
dt_cpu_ftrs= [PPC]
|
||||
Format: {"off" | "known"}
|
||||
Control how the dt_cpu_ftrs device-tree binding is
|
||||
used for CPU feature discovery and setup (if it
|
||||
exists).
|
||||
off: Do not use it, fall back to legacy cpu table.
|
||||
known: Do not pass through unknown features to guests
|
||||
or userspace, only those that the kernel is aware of.
|
||||
|
||||
dump_apple_properties [X86]
|
||||
Dump name and content of EFI device properties on
|
||||
x86 Macs. Useful for driver authors to determine
|
||||
|
|
|
@ -374,22 +374,6 @@ source "arch/powerpc/platforms/Kconfig"
|
|||
|
||||
menu "Kernel options"
|
||||
|
||||
config PPC_DT_CPU_FTRS
|
||||
bool "Device-tree based CPU feature discovery & setup"
|
||||
depends on PPC_BOOK3S_64
|
||||
default n
|
||||
help
|
||||
This enables code to use a new device tree binding for describing CPU
|
||||
compatibility and features. Saying Y here will attempt to use the new
|
||||
binding if the firmware provides it. Currently only the skiboot
|
||||
firmware provides this binding.
|
||||
If you're not sure say Y.
|
||||
|
||||
config PPC_CPUFEATURES_ENABLE_UNKNOWN
|
||||
bool "cpufeatures pass through unknown features to guest/userspace"
|
||||
depends on PPC_DT_CPU_FTRS
|
||||
default y
|
||||
|
||||
config HIGHMEM
|
||||
bool "High memory support"
|
||||
depends on PPC32
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#define H_PTE_INDEX_SIZE 9
|
||||
#define H_PMD_INDEX_SIZE 7
|
||||
#define H_PUD_INDEX_SIZE 9
|
||||
#define H_PGD_INDEX_SIZE 12
|
||||
#define H_PGD_INDEX_SIZE 9
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
|
||||
|
|
|
@ -104,7 +104,7 @@
|
|||
"1: "PPC_TLNEI" %4,0\n" \
|
||||
_EMIT_BUG_ENTRY \
|
||||
: : "i" (__FILE__), "i" (__LINE__), \
|
||||
"i" (BUGFLAG_TAINT(TAINT_WARN)), \
|
||||
"i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\
|
||||
"i" (sizeof(struct bug_entry)), \
|
||||
"r" (__ret_warn_on)); \
|
||||
} \
|
||||
|
|
|
@ -214,7 +214,6 @@ enum {
|
|||
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
|
||||
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
|
||||
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
|
||||
#define CPU_FTR_SUBCORE LONG_ASM_CONST(0x2000000000000000)
|
||||
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
@ -463,7 +462,7 @@ enum {
|
|||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
||||
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
|
||||
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
|
||||
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE)
|
||||
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
|
||||
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
|
||||
#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
|
||||
#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
|
|
|
@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
|
|||
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
|
||||
extern int kprobe_handler(struct pt_regs *regs);
|
||||
extern int kprobe_post_handler(struct pt_regs *regs);
|
||||
extern int is_current_kprobe_addr(unsigned long addr);
|
||||
#ifdef CONFIG_KPROBES_ON_FTRACE
|
||||
extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb);
|
||||
|
|
|
@ -110,13 +110,18 @@ void release_thread(struct task_struct *);
|
|||
#define TASK_SIZE_128TB (0x0000800000000000UL)
|
||||
#define TASK_SIZE_512TB (0x0002000000000000UL)
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/*
|
||||
* For now 512TB is only supported with book3s and 64K linux page size.
|
||||
*/
|
||||
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
|
||||
/*
|
||||
* Max value currently used:
|
||||
*/
|
||||
#define TASK_SIZE_USER64 TASK_SIZE_512TB
|
||||
#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB
|
||||
#else
|
||||
#define TASK_SIZE_USER64 TASK_SIZE_64TB
|
||||
#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -132,7 +137,7 @@ void release_thread(struct task_struct *);
|
|||
* space during mmap's.
|
||||
*/
|
||||
#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
|
||||
#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4))
|
||||
#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
|
||||
|
||||
#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
|
||||
TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
|
||||
|
@ -144,20 +149,14 @@ void release_thread(struct task_struct *);
|
|||
*/
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \
|
||||
TASK_SIZE_USER32 : TASK_SIZE_128TB)
|
||||
TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
|
||||
#else
|
||||
#define DEFAULT_MAP_WINDOW TASK_SIZE
|
||||
#endif
|
||||
|
||||
#ifdef __powerpc64__
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* Limit stack to 128TB */
|
||||
#define STACK_TOP_USER64 TASK_SIZE_128TB
|
||||
#else
|
||||
#define STACK_TOP_USER64 TASK_SIZE_USER64
|
||||
#endif
|
||||
|
||||
#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
|
||||
#define STACK_TOP_USER32 TASK_SIZE_USER32
|
||||
|
||||
#define STACK_TOP (is_32bit_task() ? \
|
||||
|
|
|
@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
|
|||
extern int sysfs_add_device_to_node(struct device *dev, int nid);
|
||||
extern void sysfs_remove_device_from_node(struct device *dev, int nid);
|
||||
|
||||
static inline int early_cpu_to_node(int cpu)
|
||||
{
|
||||
int nid;
|
||||
|
||||
nid = numa_cpu_lookup_table[cpu];
|
||||
|
||||
/*
|
||||
* Fall back to node 0 if nid is unset (it should be, except bugs).
|
||||
* This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
|
||||
*/
|
||||
return (nid < 0) ? 0 : nid;
|
||||
}
|
||||
#else
|
||||
|
||||
static inline int early_cpu_to_node(int cpu) { return 0; }
|
||||
|
||||
static inline void dump_numa_cpu_topology(void) {}
|
||||
|
||||
static inline int sysfs_add_device_to_node(struct device *dev, int nid)
|
||||
|
|
|
@ -267,13 +267,7 @@ do { \
|
|||
extern unsigned long __copy_tofrom_user(void __user *to,
|
||||
const void __user *from, unsigned long size);
|
||||
|
||||
#ifndef __powerpc64__
|
||||
|
||||
#define INLINE_COPY_FROM_USER
|
||||
#define INLINE_COPY_TO_USER
|
||||
|
||||
#else /* __powerpc64__ */
|
||||
|
||||
#ifdef __powerpc64__
|
||||
static inline unsigned long
|
||||
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
|
|
|
@ -94,11 +94,13 @@ struct xive_q {
|
|||
* store at 0 and some ESBs support doing a trigger via a
|
||||
* separate trigger page.
|
||||
*/
|
||||
#define XIVE_ESB_GET 0x800
|
||||
#define XIVE_ESB_SET_PQ_00 0xc00
|
||||
#define XIVE_ESB_SET_PQ_01 0xd00
|
||||
#define XIVE_ESB_SET_PQ_10 0xe00
|
||||
#define XIVE_ESB_SET_PQ_11 0xf00
|
||||
#define XIVE_ESB_STORE_EOI 0x400 /* Store */
|
||||
#define XIVE_ESB_LOAD_EOI 0x000 /* Load */
|
||||
#define XIVE_ESB_GET 0x800 /* Load */
|
||||
#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
|
||||
#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
|
||||
#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
|
||||
#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
|
||||
|
||||
#define XIVE_ESB_VAL_P 0x2
|
||||
#define XIVE_ESB_VAL_Q 0x1
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/libfdt.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/sched.h>
|
||||
|
@ -642,7 +643,6 @@ static struct dt_cpu_feature_match __initdata
|
|||
{"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
|
||||
{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
|
||||
{"processor-utilization-of-resources-register", feat_enable_purr, 0},
|
||||
{"subcore", feat_enable, CPU_FTR_SUBCORE},
|
||||
{"no-execute", feat_enable, 0},
|
||||
{"strong-access-ordering", feat_enable, CPU_FTR_SAO},
|
||||
{"cache-inhibited-large-page", feat_enable_large_ci, 0},
|
||||
|
@ -671,12 +671,24 @@ static struct dt_cpu_feature_match __initdata
|
|||
{"wait-v3", feat_enable, 0},
|
||||
};
|
||||
|
||||
/* XXX: how to configure this? Default + boot time? */
|
||||
#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN
|
||||
#define CPU_FEATURE_ENABLE_UNKNOWN 1
|
||||
#else
|
||||
#define CPU_FEATURE_ENABLE_UNKNOWN 0
|
||||
#endif
|
||||
static bool __initdata using_dt_cpu_ftrs;
|
||||
static bool __initdata enable_unknown = true;
|
||||
|
||||
static int __init dt_cpu_ftrs_parse(char *str)
|
||||
{
|
||||
if (!str)
|
||||
return 0;
|
||||
|
||||
if (!strcmp(str, "off"))
|
||||
using_dt_cpu_ftrs = false;
|
||||
else if (!strcmp(str, "known"))
|
||||
enable_unknown = false;
|
||||
else
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
|
||||
|
||||
static void __init cpufeatures_setup_start(u32 isa)
|
||||
{
|
||||
|
@ -707,7 +719,7 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
|
|||
}
|
||||
}
|
||||
|
||||
if (!known && CPU_FEATURE_ENABLE_UNKNOWN) {
|
||||
if (!known && enable_unknown) {
|
||||
if (!feat_try_enable_unknown(f)) {
|
||||
pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
|
||||
f->name);
|
||||
|
@ -756,6 +768,26 @@ static void __init cpufeatures_setup_finished(void)
|
|||
cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
|
||||
}
|
||||
|
||||
static int __init disabled_on_cmdline(void)
|
||||
{
|
||||
unsigned long root, chosen;
|
||||
const char *p;
|
||||
|
||||
root = of_get_flat_dt_root();
|
||||
chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
|
||||
if (chosen == -FDT_ERR_NOTFOUND)
|
||||
return false;
|
||||
|
||||
p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
|
||||
if (!p)
|
||||
return false;
|
||||
|
||||
if (strstr(p, "dt_cpu_ftrs=off"))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
|
||||
int depth, void *data)
|
||||
{
|
||||
|
@ -766,8 +798,6 @@ static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool __initdata using_dt_cpu_ftrs = false;
|
||||
|
||||
bool __init dt_cpu_ftrs_in_use(void)
|
||||
{
|
||||
return using_dt_cpu_ftrs;
|
||||
|
@ -775,6 +805,8 @@ bool __init dt_cpu_ftrs_in_use(void)
|
|||
|
||||
bool __init dt_cpu_ftrs_init(void *fdt)
|
||||
{
|
||||
using_dt_cpu_ftrs = false;
|
||||
|
||||
/* Setup and verify the FDT, if it fails we just bail */
|
||||
if (!early_init_dt_verify(fdt))
|
||||
return false;
|
||||
|
@ -782,6 +814,9 @@ bool __init dt_cpu_ftrs_init(void *fdt)
|
|||
if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
|
||||
return false;
|
||||
|
||||
if (disabled_on_cmdline())
|
||||
return false;
|
||||
|
||||
cpufeatures_setup_cpu();
|
||||
|
||||
using_dt_cpu_ftrs = true;
|
||||
|
@ -1027,5 +1062,8 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
|
|||
|
||||
void __init dt_cpu_ftrs_scan(void)
|
||||
{
|
||||
if (!using_dt_cpu_ftrs)
|
||||
return;
|
||||
|
||||
of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
|
||||
}
|
||||
|
|
|
@ -1452,10 +1452,8 @@ USE_TEXT_SECTION()
|
|||
.balign IFETCH_ALIGN_BYTES
|
||||
do_hash_page:
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
andis. r0,r4,0xa410 /* weird error? */
|
||||
andis. r0,r4,0xa450 /* weird error? */
|
||||
bne- handle_page_fault /* if not, try to insert a HPTE */
|
||||
andis. r0,r4,DSISR_DABRMATCH@h
|
||||
bne- handle_dabr_fault
|
||||
CURRENT_THREAD_INFO(r11, r1)
|
||||
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
|
||||
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
|
||||
|
@ -1479,11 +1477,16 @@ do_hash_page:
|
|||
|
||||
/* Error */
|
||||
blt- 13f
|
||||
|
||||
/* Reload DSISR into r4 for the DABR check below */
|
||||
ld r4,_DSISR(r1)
|
||||
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||
|
||||
/* Here we have a page fault that hash_page can't handle. */
|
||||
handle_page_fault:
|
||||
11: ld r4,_DAR(r1)
|
||||
11: andis. r0,r4,DSISR_DABRMATCH@h
|
||||
bne- handle_dabr_fault
|
||||
ld r4,_DAR(r1)
|
||||
ld r5,_DSISR(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl do_page_fault
|
||||
|
|
|
@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
|||
|
||||
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
|
||||
|
||||
int is_current_kprobe_addr(unsigned long addr)
|
||||
{
|
||||
struct kprobe *p = kprobe_running();
|
||||
return (p && (unsigned long)p->addr == addr) ? 1 : 0;
|
||||
}
|
||||
|
||||
bool arch_within_kprobe_blacklist(unsigned long addr)
|
||||
{
|
||||
return (addr >= (unsigned long)__kprobes_text_start &&
|
||||
|
@ -617,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* jprobes use jprobe_return() which skips the normal return
|
||||
* path of the function, and this messes up the accounting of the
|
||||
* function graph tracer.
|
||||
*
|
||||
* Pause function graph tracing while performing the jprobe function.
|
||||
*/
|
||||
pause_graph_tracing();
|
||||
|
||||
return 1;
|
||||
}
|
||||
NOKPROBE_SYMBOL(setjmp_pre_handler);
|
||||
|
@ -642,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
|||
* saved regs...
|
||||
*/
|
||||
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
|
||||
/* It's OK to start function graph tracing again */
|
||||
unpause_graph_tracing();
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -1697,6 +1697,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
|
|||
#ifdef CONFIG_VSX
|
||||
current->thread.used_vsr = 0;
|
||||
#endif
|
||||
current->thread.load_fp = 0;
|
||||
memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
|
||||
current->thread.fp_save_area = NULL;
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
|
@ -1705,6 +1706,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
|
|||
current->thread.vr_save_area = NULL;
|
||||
current->thread.vrsave = 0;
|
||||
current->thread.used_vr = 0;
|
||||
current->thread.load_vec = 0;
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
#ifdef CONFIG_SPE
|
||||
memset(current->thread.evr, 0, sizeof(current->thread.evr));
|
||||
|
@ -1716,6 +1718,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
|
|||
current->thread.tm_tfhar = 0;
|
||||
current->thread.tm_texasr = 0;
|
||||
current->thread.tm_tfiar = 0;
|
||||
current->thread.load_tm = 0;
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
}
|
||||
EXPORT_SYMBOL(start_thread);
|
||||
|
|
|
@ -932,7 +932,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
#ifdef CONFIG_PPC_MM_SLICES
|
||||
#ifdef CONFIG_PPC64
|
||||
init_mm.context.addr_limit = TASK_SIZE_128TB;
|
||||
init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
|
||||
#else
|
||||
#error "context.addr_limit not initialized."
|
||||
#endif
|
||||
|
|
|
@ -615,6 +615,24 @@ void __init exc_lvl_early_init(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Emergency stacks are used for a range of things, from asynchronous
|
||||
* NMIs (system reset, machine check) to synchronous, process context.
|
||||
* We set preempt_count to zero, even though that isn't necessarily correct. To
|
||||
* get the right value we'd need to copy it from the previous thread_info, but
|
||||
* doing that might fault causing more problems.
|
||||
* TODO: what to do with accounting?
|
||||
*/
|
||||
static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
|
||||
{
|
||||
ti->task = NULL;
|
||||
ti->cpu = cpu;
|
||||
ti->preempt_count = 0;
|
||||
ti->local_flags = 0;
|
||||
ti->flags = 0;
|
||||
klp_init_thread_info(ti);
|
||||
}
|
||||
|
||||
/*
|
||||
* Stack space used when we detect a bad kernel stack pointer, and
|
||||
* early in SMP boots before relocation is enabled. Exclusive emergency
|
||||
|
@ -633,24 +651,31 @@ void __init emergency_stack_init(void)
|
|||
* Since we use these as temporary stacks during secondary CPU
|
||||
* bringup, we need to get at them in real mode. This means they
|
||||
* must also be within the RMO region.
|
||||
*
|
||||
* The IRQ stacks allocated elsewhere in this file are zeroed and
|
||||
* initialized in kernel/irq.c. These are initialized here in order
|
||||
* to have emergency stacks available as early as possible.
|
||||
*/
|
||||
limit = min(safe_stack_limit(), ppc64_rma_size);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct thread_info *ti;
|
||||
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
||||
klp_init_thread_info(ti);
|
||||
memset(ti, 0, THREAD_SIZE);
|
||||
emerg_stack_init_thread_info(ti, i);
|
||||
paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* emergency stack for NMI exception handling. */
|
||||
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
||||
klp_init_thread_info(ti);
|
||||
memset(ti, 0, THREAD_SIZE);
|
||||
emerg_stack_init_thread_info(ti, i);
|
||||
paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
|
||||
/* emergency stack for machine check exception handling. */
|
||||
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
||||
klp_init_thread_info(ti);
|
||||
memset(ti, 0, THREAD_SIZE);
|
||||
emerg_stack_init_thread_info(ti, i);
|
||||
paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
#endif
|
||||
}
|
||||
|
@ -661,7 +686,7 @@ void __init emergency_stack_init(void)
|
|||
|
||||
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
|
||||
{
|
||||
return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
|
||||
return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
}
|
||||
|
||||
|
@ -672,7 +697,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
|
|||
|
||||
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
|
||||
{
|
||||
if (cpu_to_node(from) == cpu_to_node(to))
|
||||
if (early_cpu_to_node(from) == early_cpu_to_node(to))
|
||||
return LOCAL_DISTANCE;
|
||||
else
|
||||
return REMOTE_DISTANCE;
|
||||
|
|
|
@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller)
|
|||
stdu r1,-SWITCH_FRAME_SIZE(r1)
|
||||
|
||||
/* Save all gprs to pt_regs */
|
||||
SAVE_8GPRS(0,r1)
|
||||
SAVE_8GPRS(8,r1)
|
||||
SAVE_8GPRS(16,r1)
|
||||
SAVE_8GPRS(24,r1)
|
||||
SAVE_GPR(0, r1)
|
||||
SAVE_10GPRS(2, r1)
|
||||
SAVE_10GPRS(12, r1)
|
||||
SAVE_10GPRS(22, r1)
|
||||
|
||||
/* Save previous stack pointer (r1) */
|
||||
addi r8, r1, SWITCH_FRAME_SIZE
|
||||
std r8, GPR1(r1)
|
||||
|
||||
/* Load special regs for save below */
|
||||
mfmsr r8
|
||||
|
@ -95,18 +99,44 @@ ftrace_call:
|
|||
bl ftrace_stub
|
||||
nop
|
||||
|
||||
/* Load ctr with the possibly modified NIP */
|
||||
ld r3, _NIP(r1)
|
||||
mtctr r3
|
||||
/* Load the possibly modified NIP */
|
||||
ld r15, _NIP(r1)
|
||||
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
cmpd r14,r3 /* has NIP been altered? */
|
||||
cmpd r14, r15 /* has NIP been altered? */
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
|
||||
/* NIP has not been altered, skip over further checks */
|
||||
beq 1f
|
||||
|
||||
/* Check if there is an active kprobe on us */
|
||||
subi r3, r14, 4
|
||||
bl is_current_kprobe_addr
|
||||
nop
|
||||
|
||||
/*
|
||||
* If r3 == 1, then this is a kprobe/jprobe.
|
||||
* else, this is livepatched function.
|
||||
*
|
||||
* The conditional branch for livepatch_handler below will use the
|
||||
* result of this comparison. For kprobe/jprobe, we just need to branch to
|
||||
* the new NIP, not call livepatch_handler. The branch below is bne, so we
|
||||
* want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
|
||||
* CR0[EQ] = (r3 == 1).
|
||||
*/
|
||||
cmpdi r3, 1
|
||||
1:
|
||||
#endif
|
||||
|
||||
/* Load CTR with the possibly modified NIP */
|
||||
mtctr r15
|
||||
|
||||
/* Restore gprs */
|
||||
REST_8GPRS(0,r1)
|
||||
REST_8GPRS(8,r1)
|
||||
REST_8GPRS(16,r1)
|
||||
REST_8GPRS(24,r1)
|
||||
REST_GPR(0,r1)
|
||||
REST_10GPRS(2,r1)
|
||||
REST_10GPRS(12,r1)
|
||||
REST_10GPRS(22,r1)
|
||||
|
||||
/* Restore possibly modified LR */
|
||||
ld r0, _LINK(r1)
|
||||
|
@ -119,7 +149,10 @@ ftrace_call:
|
|||
addi r1, r1, SWITCH_FRAME_SIZE
|
||||
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
/* Based on the cmpd above, if the NIP was altered handle livepatch */
|
||||
/*
|
||||
* Based on the cmpd or cmpdi above, if the NIP was altered and we're
|
||||
* not on a kprobe/jprobe, then handle livepatch.
|
||||
*/
|
||||
bne- livepatch_handler
|
||||
#endif
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
|
|||
{
|
||||
/* If the XIVE supports the new "store EOI facility, use it */
|
||||
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
|
||||
__x_writeq(0, __x_eoi_page(xd));
|
||||
__x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
|
||||
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
|
||||
opal_int_eoi(hw_irq);
|
||||
} else {
|
||||
|
@ -89,7 +89,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
|
|||
* properly.
|
||||
*/
|
||||
if (xd->flags & XIVE_IRQ_FLAG_LSI)
|
||||
__x_readq(__x_eoi_page(xd));
|
||||
__x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
|
||||
else {
|
||||
eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ static int hash__init_new_context(struct mm_struct *mm)
|
|||
* mm->context.addr_limit. Default to max task size so that we copy the
|
||||
* default values to paca which will help us to handle slb miss early.
|
||||
*/
|
||||
mm->context.addr_limit = TASK_SIZE_128TB;
|
||||
mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
|
||||
|
||||
/*
|
||||
* The old code would re-promote on fork, we don't do that when using
|
||||
|
|
|
@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
|
|||
struct pt_regs *regs_user_copy)
|
||||
{
|
||||
regs_user->regs = task_pt_regs(current);
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
|
||||
PERF_SAMPLE_REGS_ABI_NONE;
|
||||
}
|
||||
|
|
|
@ -402,7 +402,7 @@ static struct power_pmu power9_isa207_pmu = {
|
|||
.name = "POWER9",
|
||||
.n_counter = MAX_PMU_COUNTERS,
|
||||
.add_fields = ISA207_ADD_FIELDS,
|
||||
.test_adder = ISA207_TEST_ADDER,
|
||||
.test_adder = P9_DD1_TEST_ADDER,
|
||||
.compute_mmcr = isa207_compute_mmcr,
|
||||
.config_bhrb = power9_config_bhrb,
|
||||
.bhrb_filter_map = power9_bhrb_filter_map,
|
||||
|
@ -421,7 +421,7 @@ static struct power_pmu power9_pmu = {
|
|||
.name = "POWER9",
|
||||
.n_counter = MAX_PMU_COUNTERS,
|
||||
.add_fields = ISA207_ADD_FIELDS,
|
||||
.test_adder = P9_DD1_TEST_ADDER,
|
||||
.test_adder = ISA207_TEST_ADDER,
|
||||
.compute_mmcr = isa207_compute_mmcr,
|
||||
.config_bhrb = power9_config_bhrb,
|
||||
.bhrb_filter_map = power9_bhrb_filter_map,
|
||||
|
|
|
@ -59,6 +59,17 @@ config PPC_OF_BOOT_TRAMPOLINE
|
|||
|
||||
In case of doubt, say Y
|
||||
|
||||
config PPC_DT_CPU_FTRS
|
||||
bool "Device-tree based CPU feature discovery & setup"
|
||||
depends on PPC_BOOK3S_64
|
||||
default y
|
||||
help
|
||||
This enables code to use a new device tree binding for describing CPU
|
||||
compatibility and features. Saying Y here will attempt to use the new
|
||||
binding if the firmware provides it. Currently only the skiboot
|
||||
firmware provides this binding.
|
||||
If you're not sure say Y.
|
||||
|
||||
config UDBG_RTAS_CONSOLE
|
||||
bool "RTAS based debug console"
|
||||
depends on PPC_RTAS
|
||||
|
|
|
@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
|
|||
skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
|
||||
if (!dump_skip(cprm, skip))
|
||||
goto Eio;
|
||||
|
||||
rc = 0;
|
||||
out:
|
||||
free_page((unsigned long)buf);
|
||||
return rc;
|
||||
|
|
|
@ -75,7 +75,8 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
|
|||
if (WARN_ON(!gpdev))
|
||||
return NULL;
|
||||
|
||||
if (WARN_ON(!gpdev->dev.of_node))
|
||||
/* Not all PCI devices have device-tree nodes */
|
||||
if (!gpdev->dev.of_node)
|
||||
return NULL;
|
||||
|
||||
/* Get assoicated PCI device */
|
||||
|
@ -448,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
|
|||
return mmio_atsd_reg;
|
||||
}
|
||||
|
||||
static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
|
||||
static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
|
||||
{
|
||||
unsigned long launch;
|
||||
|
||||
|
@ -464,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
|
|||
/* PID */
|
||||
launch |= pid << PPC_BITLSHIFT(38);
|
||||
|
||||
/* No flush */
|
||||
launch |= !flush << PPC_BITLSHIFT(39);
|
||||
|
||||
/* Invalidating the entire process doesn't use a va */
|
||||
return mmio_launch_invalidate(npu, launch, 0);
|
||||
}
|
||||
|
||||
static int mmio_invalidate_va(struct npu *npu, unsigned long va,
|
||||
unsigned long pid)
|
||||
unsigned long pid, bool flush)
|
||||
{
|
||||
unsigned long launch;
|
||||
|
||||
|
@ -485,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va,
|
|||
/* PID */
|
||||
launch |= pid << PPC_BITLSHIFT(38);
|
||||
|
||||
/* No flush */
|
||||
launch |= !flush << PPC_BITLSHIFT(39);
|
||||
|
||||
return mmio_launch_invalidate(npu, launch, va);
|
||||
}
|
||||
|
||||
#define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
|
||||
|
||||
struct mmio_atsd_reg {
|
||||
struct npu *npu;
|
||||
int reg;
|
||||
};
|
||||
|
||||
static void mmio_invalidate_wait(
|
||||
struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
|
||||
{
|
||||
struct npu *npu;
|
||||
int i, reg;
|
||||
|
||||
/* Wait for all invalidations to complete */
|
||||
for (i = 0; i <= max_npu2_index; i++) {
|
||||
if (mmio_atsd_reg[i].reg < 0)
|
||||
continue;
|
||||
|
||||
/* Wait for completion */
|
||||
npu = mmio_atsd_reg[i].npu;
|
||||
reg = mmio_atsd_reg[i].reg;
|
||||
while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
|
||||
cpu_relax();
|
||||
|
||||
put_mmio_atsd_reg(npu, reg);
|
||||
|
||||
/*
|
||||
* The GPU requires two flush ATSDs to ensure all entries have
|
||||
* been flushed. We use PID 0 as it will never be used for a
|
||||
* process on the GPU.
|
||||
*/
|
||||
if (flush)
|
||||
mmio_invalidate_pid(npu, 0, true);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate either a single address or an entire PID depending on
|
||||
* the value of va.
|
||||
*/
|
||||
static void mmio_invalidate(struct npu_context *npu_context, int va,
|
||||
unsigned long address)
|
||||
unsigned long address, bool flush)
|
||||
{
|
||||
int i, j, reg;
|
||||
int i, j;
|
||||
struct npu *npu;
|
||||
struct pnv_phb *nphb;
|
||||
struct pci_dev *npdev;
|
||||
struct {
|
||||
struct npu *npu;
|
||||
int reg;
|
||||
} mmio_atsd_reg[NV_MAX_NPUS];
|
||||
struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
|
||||
unsigned long pid = npu_context->mm->context.id;
|
||||
|
||||
/*
|
||||
|
@ -524,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
|
|||
|
||||
if (va)
|
||||
mmio_atsd_reg[i].reg =
|
||||
mmio_invalidate_va(npu, address, pid);
|
||||
mmio_invalidate_va(npu, address, pid,
|
||||
flush);
|
||||
else
|
||||
mmio_atsd_reg[i].reg =
|
||||
mmio_invalidate_pid(npu, pid);
|
||||
mmio_invalidate_pid(npu, pid, flush);
|
||||
|
||||
/*
|
||||
* The NPU hardware forwards the shootdown to all GPUs
|
||||
|
@ -543,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
|
|||
*/
|
||||
flush_tlb_mm(npu_context->mm);
|
||||
|
||||
/* Wait for all invalidations to complete */
|
||||
for (i = 0; i <= max_npu2_index; i++) {
|
||||
if (mmio_atsd_reg[i].reg < 0)
|
||||
continue;
|
||||
|
||||
/* Wait for completion */
|
||||
npu = mmio_atsd_reg[i].npu;
|
||||
reg = mmio_atsd_reg[i].reg;
|
||||
while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
|
||||
cpu_relax();
|
||||
put_mmio_atsd_reg(npu, reg);
|
||||
}
|
||||
mmio_invalidate_wait(mmio_atsd_reg, flush);
|
||||
if (flush)
|
||||
/* Wait for the flush to complete */
|
||||
mmio_invalidate_wait(mmio_atsd_reg, false);
|
||||
}
|
||||
|
||||
static void pnv_npu2_mn_release(struct mmu_notifier *mn,
|
||||
|
@ -570,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
|
|||
* There should be no more translation requests for this PID, but we
|
||||
* need to ensure any entries for it are removed from the TLB.
|
||||
*/
|
||||
mmio_invalidate(npu_context, 0, 0);
|
||||
mmio_invalidate(npu_context, 0, 0, true);
|
||||
}
|
||||
|
||||
static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
|
||||
|
@ -580,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
|
|||
{
|
||||
struct npu_context *npu_context = mn_to_npu_context(mn);
|
||||
|
||||
mmio_invalidate(npu_context, 1, address);
|
||||
mmio_invalidate(npu_context, 1, address, true);
|
||||
}
|
||||
|
||||
static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
|
||||
|
@ -589,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
|
|||
{
|
||||
struct npu_context *npu_context = mn_to_npu_context(mn);
|
||||
|
||||
mmio_invalidate(npu_context, 1, address);
|
||||
mmio_invalidate(npu_context, 1, address, true);
|
||||
}
|
||||
|
||||
static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
|
||||
|
@ -599,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
|
|||
struct npu_context *npu_context = mn_to_npu_context(mn);
|
||||
unsigned long address;
|
||||
|
||||
for (address = start; address <= end; address += PAGE_SIZE)
|
||||
mmio_invalidate(npu_context, 1, address);
|
||||
for (address = start; address < end; address += PAGE_SIZE)
|
||||
mmio_invalidate(npu_context, 1, address, false);
|
||||
|
||||
/* Do the flush only on the final addess == end */
|
||||
mmio_invalidate(npu_context, 1, address, true);
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
|
||||
|
@ -650,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
|
|||
/* No nvlink associated with this GPU device */
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if (!mm) {
|
||||
/* kernel thread contexts are not supported */
|
||||
if (!mm || mm->context.id == 0) {
|
||||
/*
|
||||
* Kernel thread contexts are not supported and context id 0 is
|
||||
* reserved on the GPU.
|
||||
*/
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
|
|
|
@ -408,7 +408,13 @@ static DEVICE_ATTR(subcores_per_core, 0644,
|
|||
|
||||
static int subcore_init(void)
|
||||
{
|
||||
if (!cpu_has_feature(CPU_FTR_SUBCORE))
|
||||
unsigned pvr_ver;
|
||||
|
||||
pvr_ver = PVR_VER(mfspr(SPRN_PVR));
|
||||
|
||||
if (pvr_ver != PVR_POWER8 &&
|
||||
pvr_ver != PVR_POWER8E &&
|
||||
pvr_ver != PVR_POWER8NVL)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
|
|
@ -125,6 +125,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
|
|||
for (i = 0; i < num_lmbs; i++) {
|
||||
lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
|
||||
lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
|
||||
lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
|
||||
lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
|
||||
}
|
||||
|
||||
|
@ -148,6 +149,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
|
|||
for (i = 0; i < num_lmbs; i++) {
|
||||
lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
|
||||
lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
|
||||
lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
|
||||
lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
|
|||
|
||||
static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
|
||||
{
|
||||
struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
|
||||
struct u8_gpio_chip *u8_gc =
|
||||
container_of(mm_gc, struct u8_gpio_chip, mm_gc);
|
||||
|
||||
u8_gc->data = in_8(mm_gc->regs);
|
||||
}
|
||||
|
|
|
@ -297,7 +297,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
|
|||
{
|
||||
/* If the XIVE supports the new "store EOI facility, use it */
|
||||
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
|
||||
out_be64(xd->eoi_mmio, 0);
|
||||
out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0);
|
||||
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
|
||||
/*
|
||||
* The FW told us to call it. This happens for some
|
||||
|
|
|
@ -45,7 +45,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
|
|||
mutex_init(&ctx->mapping_lock);
|
||||
ctx->mapping = NULL;
|
||||
|
||||
if (cxl_is_psl8(afu)) {
|
||||
if (cxl_is_power8()) {
|
||||
spin_lock_init(&ctx->sste_lock);
|
||||
|
||||
/*
|
||||
|
@ -189,7 +189,7 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
|
|||
if (start + len > ctx->afu->adapter->ps_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (cxl_is_psl9(ctx->afu)) {
|
||||
if (cxl_is_power9()) {
|
||||
/*
|
||||
* Make sure there is a valid problem state
|
||||
* area space for this AFU.
|
||||
|
@ -324,7 +324,7 @@ static void reclaim_ctx(struct rcu_head *rcu)
|
|||
{
|
||||
struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
|
||||
|
||||
if (cxl_is_psl8(ctx->afu))
|
||||
if (cxl_is_power8())
|
||||
free_page((u64)ctx->sstp);
|
||||
if (ctx->ff_page)
|
||||
__free_page(ctx->ff_page);
|
||||
|
|
|
@ -357,6 +357,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
|
|||
#define CXL_PSL9_DSISR_An_PF_RGP 0x0000000000000090ULL /* PTE not found (Radix Guest (parent)) 0b10010000 */
|
||||
#define CXL_PSL9_DSISR_An_PF_HRH 0x0000000000000094ULL /* PTE not found (HPT/Radix Host) 0b10010100 */
|
||||
#define CXL_PSL9_DSISR_An_PF_STEG 0x000000000000009CULL /* PTE not found (STEG VA) 0b10011100 */
|
||||
#define CXL_PSL9_DSISR_An_URTCH 0x00000000000000B4ULL /* Unsupported Radix Tree Configuration 0b10110100 */
|
||||
|
||||
/****** CXL_PSL_TFC_An ******************************************************/
|
||||
#define CXL_PSL_TFC_An_A (1ull << (63-28)) /* Acknowledge non-translation fault */
|
||||
|
@ -844,24 +845,15 @@ static inline bool cxl_is_power8(void)
|
|||
|
||||
static inline bool cxl_is_power9(void)
|
||||
{
|
||||
/* intermediate solution */
|
||||
if (!cxl_is_power8() &&
|
||||
(cpu_has_feature(CPU_FTRS_POWER9) ||
|
||||
cpu_has_feature(CPU_FTR_POWER9_DD1)))
|
||||
if (pvr_version_is(PVR_POWER9))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool cxl_is_psl8(struct cxl_afu *afu)
|
||||
static inline bool cxl_is_power9_dd1(void)
|
||||
{
|
||||
if (afu->adapter->caia_major == 1)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool cxl_is_psl9(struct cxl_afu *afu)
|
||||
{
|
||||
if (afu->adapter->caia_major == 2)
|
||||
if ((pvr_version_is(PVR_POWER9)) &&
|
||||
cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -187,7 +187,7 @@ static struct mm_struct *get_mem_context(struct cxl_context *ctx)
|
|||
|
||||
static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
|
||||
{
|
||||
if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DS))
|
||||
if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS)))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -195,15 +195,22 @@ static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
|
|||
|
||||
static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
|
||||
{
|
||||
if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DM))
|
||||
u64 crs; /* Translation Checkout Response Status */
|
||||
|
||||
if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM))
|
||||
return true;
|
||||
|
||||
if ((cxl_is_psl9(ctx->afu)) &&
|
||||
((dsisr & CXL_PSL9_DSISR_An_CO_MASK) &
|
||||
(CXL_PSL9_DSISR_An_PF_SLR | CXL_PSL9_DSISR_An_PF_RGC |
|
||||
CXL_PSL9_DSISR_An_PF_RGP | CXL_PSL9_DSISR_An_PF_HRH |
|
||||
CXL_PSL9_DSISR_An_PF_STEG)))
|
||||
if (cxl_is_power9()) {
|
||||
crs = (dsisr & CXL_PSL9_DSISR_An_CO_MASK);
|
||||
if ((crs == CXL_PSL9_DSISR_An_PF_SLR) ||
|
||||
(crs == CXL_PSL9_DSISR_An_PF_RGC) ||
|
||||
(crs == CXL_PSL9_DSISR_An_PF_RGP) ||
|
||||
(crs == CXL_PSL9_DSISR_An_PF_HRH) ||
|
||||
(crs == CXL_PSL9_DSISR_An_PF_STEG) ||
|
||||
(crs == CXL_PSL9_DSISR_An_URTCH)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -159,11 +159,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
|
|||
|
||||
/* Do this outside the status_mutex to avoid a circular dependency with
|
||||
* the locking in cxl_mmap_fault() */
|
||||
if (copy_from_user(&work, uwork,
|
||||
sizeof(struct cxl_ioctl_start_work))) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
if (copy_from_user(&work, uwork, sizeof(work)))
|
||||
return -EFAULT;
|
||||
|
||||
mutex_lock(&ctx->status_mutex);
|
||||
if (ctx->status != OPENED) {
|
||||
|
|
|
@ -329,8 +329,15 @@ static int __init init_cxl(void)
|
|||
|
||||
cxl_debugfs_init();
|
||||
|
||||
if ((rc = register_cxl_calls(&cxl_calls)))
|
||||
/*
|
||||
* we don't register the callback on P9. slb callack is only
|
||||
* used for the PSL8 MMU and CX4.
|
||||
*/
|
||||
if (cxl_is_power8()) {
|
||||
rc = register_cxl_calls(&cxl_calls);
|
||||
if (rc)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_HVMODE)) {
|
||||
cxl_ops = &cxl_native_ops;
|
||||
|
@ -347,6 +354,7 @@ static int __init init_cxl(void)
|
|||
|
||||
return 0;
|
||||
err1:
|
||||
if (cxl_is_power8())
|
||||
unregister_cxl_calls(&cxl_calls);
|
||||
err:
|
||||
cxl_debugfs_exit();
|
||||
|
@ -366,6 +374,7 @@ static void exit_cxl(void)
|
|||
|
||||
cxl_debugfs_exit();
|
||||
cxl_file_exit();
|
||||
if (cxl_is_power8())
|
||||
unregister_cxl_calls(&cxl_calls);
|
||||
idr_destroy(&cxl_adapter_idr);
|
||||
}
|
||||
|
|
|
@ -105,11 +105,16 @@ static int native_afu_reset(struct cxl_afu *afu)
|
|||
CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
|
||||
false);
|
||||
|
||||
/* Re-enable any masked interrupts */
|
||||
/*
|
||||
* Re-enable any masked interrupts when the AFU is not
|
||||
* activated to avoid side effects after attaching a process
|
||||
* in dedicated mode.
|
||||
*/
|
||||
if (afu->current_mode == 0) {
|
||||
serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
|
||||
serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
|
||||
cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
|
||||
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -139,9 +144,9 @@ int cxl_psl_purge(struct cxl_afu *afu)
|
|||
|
||||
pr_devel("PSL purge request\n");
|
||||
|
||||
if (cxl_is_psl8(afu))
|
||||
if (cxl_is_power8())
|
||||
trans_fault = CXL_PSL_DSISR_TRANS;
|
||||
if (cxl_is_psl9(afu))
|
||||
if (cxl_is_power9())
|
||||
trans_fault = CXL_PSL9_DSISR_An_TF;
|
||||
|
||||
if (!cxl_ops->link_ok(afu->adapter, afu)) {
|
||||
|
@ -603,7 +608,7 @@ static u64 calculate_sr(struct cxl_context *ctx)
|
|||
if (!test_tsk_thread_flag(current, TIF_32BIT))
|
||||
sr |= CXL_PSL_SR_An_SF;
|
||||
}
|
||||
if (cxl_is_psl9(ctx->afu)) {
|
||||
if (cxl_is_power9()) {
|
||||
if (radix_enabled())
|
||||
sr |= CXL_PSL_SR_An_XLAT_ror;
|
||||
else
|
||||
|
@ -1117,10 +1122,10 @@ static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
|
|||
|
||||
static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr)
|
||||
{
|
||||
if ((cxl_is_psl8(afu)) && (dsisr & CXL_PSL_DSISR_TRANS))
|
||||
if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS))
|
||||
return true;
|
||||
|
||||
if ((cxl_is_psl9(afu)) && (dsisr & CXL_PSL9_DSISR_An_TF))
|
||||
if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -1194,10 +1199,10 @@ static void native_irq_wait(struct cxl_context *ctx)
|
|||
if (ph != ctx->pe)
|
||||
return;
|
||||
dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
|
||||
if (cxl_is_psl8(ctx->afu) &&
|
||||
if (cxl_is_power8() &&
|
||||
((dsisr & CXL_PSL_DSISR_PENDING) == 0))
|
||||
return;
|
||||
if (cxl_is_psl9(ctx->afu) &&
|
||||
if (cxl_is_power9() &&
|
||||
((dsisr & CXL_PSL9_DSISR_PENDING) == 0))
|
||||
return;
|
||||
/*
|
||||
|
@ -1302,13 +1307,16 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter)
|
|||
|
||||
void cxl_native_release_psl_err_irq(struct cxl *adapter)
|
||||
{
|
||||
if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
|
||||
if (adapter->native->err_virq == 0 ||
|
||||
adapter->native->err_virq !=
|
||||
irq_find_mapping(NULL, adapter->native->err_hwirq))
|
||||
return;
|
||||
|
||||
cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
|
||||
cxl_unmap_irq(adapter->native->err_virq, adapter);
|
||||
cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
|
||||
kfree(adapter->irq_name);
|
||||
adapter->native->err_virq = 0;
|
||||
}
|
||||
|
||||
int cxl_native_register_serr_irq(struct cxl_afu *afu)
|
||||
|
@ -1346,13 +1354,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu)
|
|||
|
||||
void cxl_native_release_serr_irq(struct cxl_afu *afu)
|
||||
{
|
||||
if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
|
||||
if (afu->serr_virq == 0 ||
|
||||
afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
|
||||
return;
|
||||
|
||||
cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
|
||||
cxl_unmap_irq(afu->serr_virq, afu);
|
||||
cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
|
||||
kfree(afu->err_irq_name);
|
||||
afu->serr_virq = 0;
|
||||
}
|
||||
|
||||
int cxl_native_register_psl_irq(struct cxl_afu *afu)
|
||||
|
@ -1375,12 +1385,15 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
|
|||
|
||||
void cxl_native_release_psl_irq(struct cxl_afu *afu)
|
||||
{
|
||||
if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
|
||||
if (afu->native->psl_virq == 0 ||
|
||||
afu->native->psl_virq !=
|
||||
irq_find_mapping(NULL, afu->native->psl_hwirq))
|
||||
return;
|
||||
|
||||
cxl_unmap_irq(afu->native->psl_virq, afu);
|
||||
cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
|
||||
kfree(afu->psl_irq_name);
|
||||
afu->native->psl_virq = 0;
|
||||
}
|
||||
|
||||
static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
|
||||
|
|
|
@ -436,7 +436,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, struct pci
|
|||
/* nMMU_ID Defaults to: b’000001001’*/
|
||||
xsl_dsnctl |= ((u64)0x09 << (63-28));
|
||||
|
||||
if (cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1)) {
|
||||
if (!(cxl_is_power9_dd1())) {
|
||||
/*
|
||||
* Used to identify CAPI packets which should be sorted into
|
||||
* the Non-Blocking queues by the PHB. This field should match
|
||||
|
@ -491,7 +491,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, struct pci
|
|||
cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000003FFFF0000ULL);
|
||||
|
||||
/* Disable vc dd1 fix */
|
||||
if ((cxl_is_power9() && cpu_has_feature(CPU_FTR_POWER9_DD1)))
|
||||
if (cxl_is_power9_dd1())
|
||||
cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL);
|
||||
|
||||
return 0;
|
||||
|
@ -1439,8 +1439,7 @@ int cxl_pci_reset(struct cxl *adapter)
|
|||
* The adapter is about to be reset, so ignore errors.
|
||||
* Not supported on P9 DD1
|
||||
*/
|
||||
if ((cxl_is_power8()) ||
|
||||
((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1))))
|
||||
if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
|
||||
cxl_data_cache_flush(adapter);
|
||||
|
||||
/* pcie_warm_reset requests a fundamental pci reset which includes a
|
||||
|
@ -1750,7 +1749,6 @@ static const struct cxl_service_layer_ops psl9_ops = {
|
|||
.debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9,
|
||||
.debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9,
|
||||
.psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9,
|
||||
.err_irq_dump_registers = cxl_native_err_irq_dump_regs,
|
||||
.debugfs_stop_trace = cxl_stop_trace_psl9,
|
||||
.write_timebase_ctrl = write_timebase_ctrl_psl9,
|
||||
.timebase_read = timebase_read_psl9,
|
||||
|
@ -1889,8 +1887,7 @@ static void cxl_pci_remove_adapter(struct cxl *adapter)
|
|||
* Flush adapter datacache as its about to be removed.
|
||||
* Not supported on P9 DD1.
|
||||
*/
|
||||
if ((cxl_is_power8()) ||
|
||||
((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1))))
|
||||
if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
|
||||
cxl_data_cache_flush(adapter);
|
||||
|
||||
cxl_deconfigure_adapter(adapter);
|
||||
|
|
Loading…
Reference in New Issue