Merge branch 'fixes' into next
Merge our fixes branch to bring in some changes that conflict with upcoming next content.
This commit is contained in:
commit
fc8a898cfd
|
@ -163,7 +163,6 @@ config PPC
|
|||
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
|
||||
select ARCH_WANT_LD_ORPHAN_WARN
|
||||
select ARCH_WANTS_MODULES_DATA_IN_VMALLOC if PPC_BOOK3S_32 || PPC_8xx
|
||||
select ARCH_WANTS_NO_INSTR
|
||||
select ARCH_WEAK_RELEASE_ACQUIRE
|
||||
select BINFMT_ELF
|
||||
select BUILDTIME_TABLE_SORT
|
||||
|
|
|
@ -210,6 +210,10 @@ ld_version()
|
|||
gsub(".*version ", "");
|
||||
gsub("-.*", "");
|
||||
split($1,a, ".");
|
||||
if( length(a[3]) == "8" )
|
||||
# a[3] is probably a date of format yyyymmdd used for release snapshots. We
|
||||
# can assume it to be zero as it does not signify a new version as such.
|
||||
a[3] = 0;
|
||||
print a[1]*100000000 + a[2]*1000000 + a[3]*10000;
|
||||
exit
|
||||
}'
|
||||
|
|
|
@ -97,6 +97,8 @@ static inline void tlb_flush(struct mmu_gather *tlb)
|
|||
{
|
||||
if (radix_enabled())
|
||||
radix__tlb_flush(tlb);
|
||||
|
||||
return hash__tlb_flush(tlb);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -175,6 +175,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
|
|||
return flags;
|
||||
}
|
||||
|
||||
static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
|
||||
{
|
||||
unsigned long flags = irq_soft_mask_return();
|
||||
|
||||
irq_soft_mask_set(flags & ~mask);
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline unsigned long arch_local_save_flags(void)
|
||||
{
|
||||
return irq_soft_mask_return();
|
||||
|
@ -194,7 +203,7 @@ static inline void arch_local_irq_enable(void)
|
|||
|
||||
static inline unsigned long arch_local_irq_save(void)
|
||||
{
|
||||
return irq_soft_mask_set_return(IRQS_DISABLED);
|
||||
return irq_soft_mask_or_return(IRQS_DISABLED);
|
||||
}
|
||||
|
||||
static inline bool arch_irqs_disabled_flags(unsigned long flags)
|
||||
|
@ -333,10 +342,11 @@ bool power_pmu_wants_prompt_pmi(void);
|
|||
* is a different soft-masked interrupt pending that requires hard
|
||||
* masking.
|
||||
*/
|
||||
static inline bool should_hard_irq_enable(void)
|
||||
static inline bool should_hard_irq_enable(struct pt_regs *regs)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
|
||||
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
|
||||
WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
|
||||
WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
|
||||
WARN_ON(mfmsr() & MSR_EE);
|
||||
}
|
||||
|
||||
|
@ -349,8 +359,17 @@ static inline bool should_hard_irq_enable(void)
|
|||
*
|
||||
* TODO: Add test for 64e
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
|
||||
return false;
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
|
||||
if (!power_pmu_wants_prompt_pmi())
|
||||
return false;
|
||||
/*
|
||||
* If PMIs are disabled then IRQs should be disabled as well,
|
||||
* so we shouldn't see this condition, check for it just in
|
||||
* case because we are about to enable PMIs.
|
||||
*/
|
||||
if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
|
||||
return false;
|
||||
|
@ -360,18 +379,16 @@ static inline bool should_hard_irq_enable(void)
|
|||
|
||||
/*
|
||||
* Do the hard enabling, only call this if should_hard_irq_enable is true.
|
||||
* This allows PMI interrupts to profile irq handlers.
|
||||
*/
|
||||
static inline void do_hard_irq_enable(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
|
||||
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
|
||||
WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
|
||||
WARN_ON(mfmsr() & MSR_EE);
|
||||
}
|
||||
/*
|
||||
* This allows PMI interrupts (and watchdog soft-NMIs) through.
|
||||
* There is no other reason to enable this way.
|
||||
* Asynch interrupts come in with IRQS_ALL_DISABLED,
|
||||
* PACA_IRQ_HARD_DIS, and MSR[EE]=0.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
|
||||
irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
|
||||
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
|
||||
__hard_irq_enable();
|
||||
}
|
||||
|
@ -454,7 +471,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
|
|||
return !(regs->msr & MSR_EE);
|
||||
}
|
||||
|
||||
static __always_inline bool should_hard_irq_enable(void)
|
||||
static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -137,7 +137,7 @@ struct imc_pmu {
|
|||
* are inited.
|
||||
*/
|
||||
struct imc_pmu_ref {
|
||||
struct mutex lock;
|
||||
spinlock_t lock;
|
||||
unsigned int id;
|
||||
int refc;
|
||||
};
|
||||
|
|
|
@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
|
|||
|
||||
ppc_msgsync();
|
||||
|
||||
if (should_hard_irq_enable())
|
||||
if (should_hard_irq_enable(regs))
|
||||
do_hard_irq_enable();
|
||||
|
||||
kvmppc_clear_host_ipi(smp_processor_id());
|
||||
|
|
|
@ -864,7 +864,7 @@ _GLOBAL(load_up_spe)
|
|||
* SPE unavailable trap from kernel - print a message, but let
|
||||
* the task use SPE in the kernel until it returns to user mode.
|
||||
*/
|
||||
KernelSPE:
|
||||
SYM_FUNC_START_LOCAL(KernelSPE)
|
||||
lwz r3,_MSR(r1)
|
||||
oris r3,r3,MSR_SPE@h
|
||||
stw r3,_MSR(r1) /* enable use of SPE after return */
|
||||
|
@ -881,6 +881,7 @@ KernelSPE:
|
|||
#endif
|
||||
.align 4,0
|
||||
|
||||
SYM_FUNC_END(KernelSPE)
|
||||
#endif /* CONFIG_SPE */
|
||||
|
||||
/*
|
||||
|
|
|
@ -50,16 +50,18 @@ static inline bool exit_must_hard_disable(void)
|
|||
*/
|
||||
static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
|
||||
{
|
||||
bool must_hard_disable = (exit_must_hard_disable() || !restartable);
|
||||
|
||||
/* This must be done with RI=1 because tracing may touch vmaps */
|
||||
trace_hardirqs_on();
|
||||
|
||||
if (exit_must_hard_disable() || !restartable)
|
||||
if (must_hard_disable)
|
||||
__hard_EE_RI_disable();
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/* This pattern matches prep_irq_for_idle */
|
||||
if (unlikely(lazy_irq_pending_nocheck())) {
|
||||
if (exit_must_hard_disable() || !restartable) {
|
||||
if (must_hard_disable) {
|
||||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
__hard_RI_enable();
|
||||
}
|
||||
|
|
|
@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
|
|||
irq = static_call(ppc_get_irq)();
|
||||
|
||||
/* We can hard enable interrupts now to allow perf interrupts */
|
||||
if (should_hard_irq_enable())
|
||||
if (should_hard_irq_enable(regs))
|
||||
do_hard_irq_enable();
|
||||
|
||||
/* And finally process it */
|
||||
|
|
|
@ -515,7 +515,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
|
|||
}
|
||||
|
||||
/* Conditionally hard-enable interrupts. */
|
||||
if (should_hard_irq_enable()) {
|
||||
if (should_hard_irq_enable(regs)) {
|
||||
/*
|
||||
* Ensure a positive value is written to the decrementer, or
|
||||
* else some CPUs will continue to take decrementer exceptions.
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <asm/firmware.h>
|
||||
#include <asm/kexec_ranges.h>
|
||||
#include <asm/crashdump-ppc64.h>
|
||||
#include <asm/mmzone.h>
|
||||
#include <asm/prom.h>
|
||||
|
||||
struct umem_info {
|
||||
|
@ -989,10 +990,13 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
|
|||
* linux,drconf-usable-memory properties. Get an approximate on the
|
||||
* number of usable memory entries and use for FDT size estimation.
|
||||
*/
|
||||
usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) +
|
||||
(2 * (resource_size(&crashk_res) / drmem_lmb_size())));
|
||||
|
||||
extra_size = (unsigned int)(usm_entries * sizeof(u64));
|
||||
if (drmem_lmb_size()) {
|
||||
usm_entries = ((memory_hotplug_max() / drmem_lmb_size()) +
|
||||
(2 * (resource_size(&crashk_res) / drmem_lmb_size())));
|
||||
extra_size = (unsigned int)(usm_entries * sizeof(u64));
|
||||
} else {
|
||||
extra_size = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the number of CPU nodes in the current DT. This allows to
|
||||
|
|
|
@ -912,16 +912,15 @@ static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
|
|||
|
||||
static void kvmppc_fill_pt_regs(struct pt_regs *regs)
|
||||
{
|
||||
ulong r1, ip, msr, lr;
|
||||
ulong r1, msr, lr;
|
||||
|
||||
asm("mr %0, 1" : "=r"(r1));
|
||||
asm("mflr %0" : "=r"(lr));
|
||||
asm("mfmsr %0" : "=r"(msr));
|
||||
asm("bl 1f; 1: mflr %0" : "=r"(ip));
|
||||
|
||||
memset(regs, 0, sizeof(*regs));
|
||||
regs->gpr[1] = r1;
|
||||
regs->nip = ip;
|
||||
regs->nip = _THIS_IP_;
|
||||
regs->msr = msr;
|
||||
regs->link = lr;
|
||||
}
|
||||
|
|
|
@ -1012,7 +1012,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
|
|||
|
||||
void hpt_clear_stress(void);
|
||||
static struct timer_list stress_hpt_timer;
|
||||
void stress_hpt_timer_fn(struct timer_list *timer)
|
||||
static void stress_hpt_timer_fn(struct timer_list *timer)
|
||||
{
|
||||
int next_cpu;
|
||||
|
||||
|
|
|
@ -234,6 +234,14 @@ void radix__mark_rodata_ro(void)
|
|||
end = (unsigned long)__end_rodata;
|
||||
|
||||
radix__change_memory_range(start, end, _PAGE_WRITE);
|
||||
|
||||
for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) {
|
||||
end = start + PAGE_SIZE;
|
||||
if (overlaps_interrupt_vector_text(start, end))
|
||||
radix__change_memory_range(start, end, _PAGE_WRITE);
|
||||
else
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void radix__mark_initmem_nx(void)
|
||||
|
@ -262,6 +270,22 @@ print_mapping(unsigned long start, unsigned long end, unsigned long size, bool e
|
|||
static unsigned long next_boundary(unsigned long addr, unsigned long end)
|
||||
{
|
||||
#ifdef CONFIG_STRICT_KERNEL_RWX
|
||||
unsigned long stext_phys;
|
||||
|
||||
stext_phys = __pa_symbol(_stext);
|
||||
|
||||
// Relocatable kernel running at non-zero real address
|
||||
if (stext_phys != 0) {
|
||||
// The end of interrupts code at zero is a rodata boundary
|
||||
unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys;
|
||||
if (addr < end_intr)
|
||||
return end_intr;
|
||||
|
||||
// Start of relocated kernel text is a rodata boundary
|
||||
if (addr < stext_phys)
|
||||
return stext_phys;
|
||||
}
|
||||
|
||||
if (addr < __pa_symbol(__srwx_boundary))
|
||||
return __pa_symbol(__srwx_boundary);
|
||||
#endif
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <asm/cputhreads.h>
|
||||
#include <asm/smp.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
/* Nest IMC data structures and variables */
|
||||
|
||||
|
@ -50,7 +51,7 @@ static int trace_imc_mem_size;
|
|||
* core and trace-imc
|
||||
*/
|
||||
static struct imc_pmu_ref imc_global_refc = {
|
||||
.lock = __MUTEX_INITIALIZER(imc_global_refc.lock),
|
||||
.lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
|
||||
.id = 0,
|
||||
.refc = 0,
|
||||
};
|
||||
|
@ -400,7 +401,7 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
|
|||
get_hard_smp_processor_id(cpu));
|
||||
/*
|
||||
* If this is the last cpu in this chip then, skip the reference
|
||||
* count mutex lock and make the reference count on this chip zero.
|
||||
* count lock and make the reference count on this chip zero.
|
||||
*/
|
||||
ref = get_nest_pmu_ref(cpu);
|
||||
if (!ref)
|
||||
|
@ -462,15 +463,15 @@ static void nest_imc_counters_release(struct perf_event *event)
|
|||
/*
|
||||
* See if we need to disable the nest PMU.
|
||||
* If no events are currently in use, then we have to take a
|
||||
* mutex to ensure that we don't race with another task doing
|
||||
* lock to ensure that we don't race with another task doing
|
||||
* enable or disable the nest counters.
|
||||
*/
|
||||
ref = get_nest_pmu_ref(event->cpu);
|
||||
if (!ref)
|
||||
return;
|
||||
|
||||
/* Take the mutex lock for this node and then decrement the reference count */
|
||||
mutex_lock(&ref->lock);
|
||||
/* Take the lock for this node and then decrement the reference count */
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
/*
|
||||
* The scenario where this is true is, when perf session is
|
||||
|
@ -482,7 +483,7 @@ static void nest_imc_counters_release(struct perf_event *event)
|
|||
* an OPAL call to disable the engine in that node.
|
||||
*
|
||||
*/
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
return;
|
||||
}
|
||||
ref->refc--;
|
||||
|
@ -490,7 +491,7 @@ static void nest_imc_counters_release(struct perf_event *event)
|
|||
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
|
||||
get_hard_smp_processor_id(event->cpu));
|
||||
if (rc) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
|
||||
return;
|
||||
}
|
||||
|
@ -498,7 +499,7 @@ static void nest_imc_counters_release(struct perf_event *event)
|
|||
WARN(1, "nest-imc: Invalid event reference count\n");
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
}
|
||||
|
||||
static int nest_imc_event_init(struct perf_event *event)
|
||||
|
@ -557,26 +558,25 @@ static int nest_imc_event_init(struct perf_event *event)
|
|||
|
||||
/*
|
||||
* Get the imc_pmu_ref struct for this node.
|
||||
* Take the mutex lock and then increment the count of nest pmu events
|
||||
* inited.
|
||||
* Take the lock and then increment the count of nest pmu events inited.
|
||||
*/
|
||||
ref = get_nest_pmu_ref(event->cpu);
|
||||
if (!ref)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
|
||||
get_hard_smp_processor_id(event->cpu));
|
||||
if (rc) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("nest-imc: Unable to start the counters for node %d\n",
|
||||
node_id);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
|
||||
event->destroy = nest_imc_counters_release;
|
||||
return 0;
|
||||
|
@ -612,9 +612,8 @@ static int core_imc_mem_init(int cpu, int size)
|
|||
return -ENOMEM;
|
||||
mem_info->vbase = page_address(page);
|
||||
|
||||
/* Init the mutex */
|
||||
core_imc_refc[core_id].id = core_id;
|
||||
mutex_init(&core_imc_refc[core_id].lock);
|
||||
spin_lock_init(&core_imc_refc[core_id].lock);
|
||||
|
||||
rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
|
||||
__pa((void *)mem_info->vbase),
|
||||
|
@ -703,9 +702,8 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
|
|||
perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
|
||||
} else {
|
||||
/*
|
||||
* If this is the last cpu in this core then, skip taking refernce
|
||||
* count mutex lock for this core and directly zero "refc" for
|
||||
* this core.
|
||||
* If this is the last cpu in this core then skip taking reference
|
||||
* count lock for this core and directly zero "refc" for this core.
|
||||
*/
|
||||
opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(cpu));
|
||||
|
@ -720,11 +718,11 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
|
|||
* last cpu in this core and core-imc event running
|
||||
* in this cpu.
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == IMC_DOMAIN_CORE)
|
||||
imc_global_refc.refc--;
|
||||
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -739,7 +737,7 @@ static int core_imc_pmu_cpumask_init(void)
|
|||
|
||||
static void reset_global_refc(struct perf_event *event)
|
||||
{
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
imc_global_refc.refc--;
|
||||
|
||||
/*
|
||||
|
@ -751,7 +749,7 @@ static void reset_global_refc(struct perf_event *event)
|
|||
imc_global_refc.refc = 0;
|
||||
imc_global_refc.id = 0;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
}
|
||||
|
||||
static void core_imc_counters_release(struct perf_event *event)
|
||||
|
@ -764,17 +762,17 @@ static void core_imc_counters_release(struct perf_event *event)
|
|||
/*
|
||||
* See if we need to disable the IMC PMU.
|
||||
* If no events are currently in use, then we have to take a
|
||||
* mutex to ensure that we don't race with another task doing
|
||||
* lock to ensure that we don't race with another task doing
|
||||
* enable or disable the core counters.
|
||||
*/
|
||||
core_id = event->cpu / threads_per_core;
|
||||
|
||||
/* Take the mutex lock and decrement the refernce count for this core */
|
||||
/* Take the lock and decrement the refernce count for this core */
|
||||
ref = &core_imc_refc[core_id];
|
||||
if (!ref)
|
||||
return;
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
/*
|
||||
* The scenario where this is true is, when perf session is
|
||||
|
@ -786,7 +784,7 @@ static void core_imc_counters_release(struct perf_event *event)
|
|||
* an OPAL call to disable the engine in that core.
|
||||
*
|
||||
*/
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
return;
|
||||
}
|
||||
ref->refc--;
|
||||
|
@ -794,7 +792,7 @@ static void core_imc_counters_release(struct perf_event *event)
|
|||
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(event->cpu));
|
||||
if (rc) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
|
||||
return;
|
||||
}
|
||||
|
@ -802,7 +800,7 @@ static void core_imc_counters_release(struct perf_event *event)
|
|||
WARN(1, "core-imc: Invalid event reference count\n");
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
|
||||
reset_global_refc(event);
|
||||
}
|
||||
|
@ -840,7 +838,6 @@ static int core_imc_event_init(struct perf_event *event)
|
|||
if ((!pcmi->vbase))
|
||||
return -ENODEV;
|
||||
|
||||
/* Get the core_imc mutex for this core */
|
||||
ref = &core_imc_refc[core_id];
|
||||
if (!ref)
|
||||
return -EINVAL;
|
||||
|
@ -848,22 +845,22 @@ static int core_imc_event_init(struct perf_event *event)
|
|||
/*
|
||||
* Core pmu units are enabled only when it is used.
|
||||
* See if this is triggered for the first time.
|
||||
* If yes, take the mutex lock and enable the core counters.
|
||||
* If yes, take the lock and enable the core counters.
|
||||
* If not, just increment the count in core_imc_refc struct.
|
||||
*/
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(event->cpu));
|
||||
if (rc) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("core-imc: Unable to start the counters for core %d\n",
|
||||
core_id);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
|
||||
/*
|
||||
* Since the system can run either in accumulation or trace-mode
|
||||
|
@ -874,7 +871,7 @@ static int core_imc_event_init(struct perf_event *event)
|
|||
* to know whether any other trace/thread imc
|
||||
* events are running.
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
|
||||
/*
|
||||
* No other trace/thread imc events are running in
|
||||
|
@ -883,10 +880,10 @@ static int core_imc_event_init(struct perf_event *event)
|
|||
imc_global_refc.id = IMC_DOMAIN_CORE;
|
||||
imc_global_refc.refc++;
|
||||
} else {
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
|
||||
event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
|
||||
event->destroy = core_imc_counters_release;
|
||||
|
@ -958,10 +955,10 @@ static int ppc_thread_imc_cpu_offline(unsigned int cpu)
|
|||
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
|
||||
|
||||
/* Reduce the refc if thread-imc event running on this cpu */
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == IMC_DOMAIN_THREAD)
|
||||
imc_global_refc.refc--;
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1001,7 +998,7 @@ static int thread_imc_event_init(struct perf_event *event)
|
|||
if (!target)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
/*
|
||||
* Check if any other trace/core imc events are running in the
|
||||
* system, if not set the global id to thread-imc.
|
||||
|
@ -1010,10 +1007,10 @@ static int thread_imc_event_init(struct perf_event *event)
|
|||
imc_global_refc.id = IMC_DOMAIN_THREAD;
|
||||
imc_global_refc.refc++;
|
||||
} else {
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
|
||||
event->pmu->task_ctx_nr = perf_sw_context;
|
||||
event->destroy = reset_global_refc;
|
||||
|
@ -1135,25 +1132,25 @@ static int thread_imc_event_add(struct perf_event *event, int flags)
|
|||
/*
|
||||
* imc pmus are enabled only when it is used.
|
||||
* See if this is triggered for the first time.
|
||||
* If yes, take the mutex lock and enable the counters.
|
||||
* If yes, take the lock and enable the counters.
|
||||
* If not, just increment the count in ref count struct.
|
||||
*/
|
||||
ref = &core_imc_refc[core_id];
|
||||
if (!ref)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(smp_processor_id()))) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("thread-imc: Unable to start the counter\
|
||||
for core %d\n", core_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1170,12 +1167,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
|
|||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
ref->refc--;
|
||||
if (ref->refc == 0) {
|
||||
if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
|
||||
get_hard_smp_processor_id(smp_processor_id()))) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("thread-imc: Unable to stop the counters\
|
||||
for core %d\n", core_id);
|
||||
return;
|
||||
|
@ -1183,7 +1180,7 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
|
|||
} else if (ref->refc < 0) {
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
|
||||
/* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
|
||||
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
|
||||
|
@ -1224,9 +1221,8 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
|
|||
}
|
||||
}
|
||||
|
||||
/* Init the mutex, if not already */
|
||||
trace_imc_refc[core_id].id = core_id;
|
||||
mutex_init(&trace_imc_refc[core_id].lock);
|
||||
spin_lock_init(&trace_imc_refc[core_id].lock);
|
||||
|
||||
mtspr(SPRN_LDBAR, 0);
|
||||
return 0;
|
||||
|
@ -1246,10 +1242,10 @@ static int ppc_trace_imc_cpu_offline(unsigned int cpu)
|
|||
* Reduce the refc if any trace-imc event running
|
||||
* on this cpu.
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == IMC_DOMAIN_TRACE)
|
||||
imc_global_refc.refc--;
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1371,17 +1367,17 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
|
|||
}
|
||||
|
||||
mtspr(SPRN_LDBAR, ldbar_value);
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
|
||||
get_hard_smp_processor_id(smp_processor_id()))) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1414,19 +1410,19 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
|
|||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
spin_lock(&ref->lock);
|
||||
ref->refc--;
|
||||
if (ref->refc == 0) {
|
||||
if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
|
||||
get_hard_smp_processor_id(smp_processor_id()))) {
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
|
||||
return;
|
||||
}
|
||||
} else if (ref->refc < 0) {
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
spin_unlock(&ref->lock);
|
||||
|
||||
trace_imc_event_stop(event, flags);
|
||||
}
|
||||
|
@ -1448,7 +1444,7 @@ static int trace_imc_event_init(struct perf_event *event)
|
|||
* no other thread is running any core/thread imc
|
||||
* events
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
spin_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
|
||||
/*
|
||||
* No core/thread imc events are running in the
|
||||
|
@ -1457,10 +1453,10 @@ static int trace_imc_event_init(struct perf_event *event)
|
|||
imc_global_refc.id = IMC_DOMAIN_TRACE;
|
||||
imc_global_refc.refc++;
|
||||
} else {
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
spin_unlock(&imc_global_refc.lock);
|
||||
|
||||
event->hw.idx = -1;
|
||||
|
||||
|
@ -1533,10 +1529,10 @@ static int init_nest_pmu_ref(void)
|
|||
i = 0;
|
||||
for_each_node(nid) {
|
||||
/*
|
||||
* Mutex lock to avoid races while tracking the number of
|
||||
* Take the lock to avoid races while tracking the number of
|
||||
* sessions using the chip's nest pmu units.
|
||||
*/
|
||||
mutex_init(&nest_imc_refc[i].lock);
|
||||
spin_lock_init(&nest_imc_refc[i].lock);
|
||||
|
||||
/*
|
||||
* Loop to init the "id" with the node_id. Variable "i" initialized to
|
||||
|
|
Loading…
Reference in New Issue