OpenCloudOS-Kernel/arch/arm64/mm/context.c

423 lines
11 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/mm/context.c
*
* Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
* Copyright (C) 2012 ARM Ltd.
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <asm/cpufeature.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/tlbflush.h>
static u32 asid_bits;
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation;
static unsigned long *asid_map;
static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
arm64: mm: Pin down ASIDs for sharing mm with devices To enable address space sharing with the IOMMU, introduce arm64_mm_context_get() and arm64_mm_context_put(), that pin down a context and ensure that it will keep its ASID after a rollover. Export the symbols to let the modular SMMUv3 driver use them. Pinning is necessary because a device constantly needs a valid ASID, unlike tasks that only require one when running. Without pinning, we would need to notify the IOMMU when we're about to use a new ASID for a task, and it would get complicated when a new task is assigned a shared ASID. Consider the following scenario with no ASID pinned: 1. Task t1 is running on CPUx with shared ASID (gen=1, asid=1) 2. Task t2 is scheduled on CPUx, gets ASID (1, 2) 3. Task tn is scheduled on CPUy, a rollover occurs, tn gets ASID (2, 1) We would now have to immediately generate a new ASID for t1, notify the IOMMU, and finally enable task tn. We are holding the lock during all that time, since we can't afford having another CPU trigger a rollover. The IOMMU issues invalidation commands that can take tens of milliseconds. It gets needlessly complicated. All we wanted to do was schedule task tn, that has no business with the IOMMU. By letting the IOMMU pin tasks when needed, we avoid stalling the slow path, and let the pinning fail when we're out of shareable ASIDs. After a rollover, the allocator expects at least one ASID to be available in addition to the reserved ones (one per CPU). So (NR_ASIDS - NR_CPUS - 1) is the maximum number of ASIDs that can be shared with the IOMMU. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20200918101852.582559-5-jean-philippe@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
2020-09-18 18:18:44 +08:00
static unsigned long max_pinned_asids;
static unsigned long nr_pinned_asids;
static unsigned long *pinned_asid_map;
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
#define ASID_FIRST_VERSION (1UL << asid_bits)
#define NUM_USER_ASIDS ASID_FIRST_VERSION
#define ctxid2asid(asid) ((asid) & ~ASID_MASK)
#define asid2ctxid(asid, genid) ((asid) | (genid))
/* Get the ASIDBits supported by the current CPU */
static u32 get_cpu_asid_bits(void)
{
u32 asid;
int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
switch (fld) {
default:
pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
smp_processor_id(), fld);
fallthrough;
case ID_AA64MMFR0_EL1_ASIDBITS_8:
asid = 8;
break;
case ID_AA64MMFR0_EL1_ASIDBITS_16:
asid = 16;
}
return asid;
}
/* Check if the current cpu's ASIDBits is compatible with asid_bits */
void verify_cpu_asid_bits(void)
{
u32 asid = get_cpu_asid_bits();
if (asid < asid_bits) {
/*
* We cannot decrease the ASID size at runtime, so panic if we support
* fewer ASID bits than the boot CPU.
*/
pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
smp_processor_id(), asid, asid_bits);
cpu_panic_kernel();
}
}
arm64: mm: Pin down ASIDs for sharing mm with devices To enable address space sharing with the IOMMU, introduce arm64_mm_context_get() and arm64_mm_context_put(), that pin down a context and ensure that it will keep its ASID after a rollover. Export the symbols to let the modular SMMUv3 driver use them. Pinning is necessary because a device constantly needs a valid ASID, unlike tasks that only require one when running. Without pinning, we would need to notify the IOMMU when we're about to use a new ASID for a task, and it would get complicated when a new task is assigned a shared ASID. Consider the following scenario with no ASID pinned: 1. Task t1 is running on CPUx with shared ASID (gen=1, asid=1) 2. Task t2 is scheduled on CPUx, gets ASID (1, 2) 3. Task tn is scheduled on CPUy, a rollover occurs, tn gets ASID (2, 1) We would now have to immediately generate a new ASID for t1, notify the IOMMU, and finally enable task tn. We are holding the lock during all that time, since we can't afford having another CPU trigger a rollover. The IOMMU issues invalidation commands that can take tens of milliseconds. It gets needlessly complicated. All we wanted to do was schedule task tn, that has no business with the IOMMU. By letting the IOMMU pin tasks when needed, we avoid stalling the slow path, and let the pinning fail when we're out of shareable ASIDs. After a rollover, the allocator expects at least one ASID to be available in addition to the reserved ones (one per CPU). So (NR_ASIDS - NR_CPUS - 1) is the maximum number of ASIDs that can be shared with the IOMMU. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20200918101852.582559-5-jean-philippe@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
2020-09-18 18:18:44 +08:00
static void set_kpti_asid_bits(unsigned long *map)
{
unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
/*
* In case of KPTI kernel/user ASIDs are allocated in
* pairs, the bottom bit distinguishes the two: if it
* is set, then the ASID will map only userspace. Thus
* mark even as reserved for kernel.
*/
arm64: mm: Pin down ASIDs for sharing mm with devices To enable address space sharing with the IOMMU, introduce arm64_mm_context_get() and arm64_mm_context_put(), that pin down a context and ensure that it will keep its ASID after a rollover. Export the symbols to let the modular SMMUv3 driver use them. Pinning is necessary because a device constantly needs a valid ASID, unlike tasks that only require one when running. Without pinning, we would need to notify the IOMMU when we're about to use a new ASID for a task, and it would get complicated when a new task is assigned a shared ASID. Consider the following scenario with no ASID pinned: 1. Task t1 is running on CPUx with shared ASID (gen=1, asid=1) 2. Task t2 is scheduled on CPUx, gets ASID (1, 2) 3. Task tn is scheduled on CPUy, a rollover occurs, tn gets ASID (2, 1) We would now have to immediately generate a new ASID for t1, notify the IOMMU, and finally enable task tn. We are holding the lock during all that time, since we can't afford having another CPU trigger a rollover. The IOMMU issues invalidation commands that can take tens of milliseconds. It gets needlessly complicated. All we wanted to do was schedule task tn, that has no business with the IOMMU. By letting the IOMMU pin tasks when needed, we avoid stalling the slow path, and let the pinning fail when we're out of shareable ASIDs. After a rollover, the allocator expects at least one ASID to be available in addition to the reserved ones (one per CPU). So (NR_ASIDS - NR_CPUS - 1) is the maximum number of ASIDs that can be shared with the IOMMU. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20200918101852.582559-5-jean-philippe@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
2020-09-18 18:18:44 +08:00
memset(map, 0xaa, len);
}
static void set_reserved_asid_bits(void)
{
arm64: mm: Pin down ASIDs for sharing mm with devices To enable address space sharing with the IOMMU, introduce arm64_mm_context_get() and arm64_mm_context_put(), that pin down a context and ensure that it will keep its ASID after a rollover. Export the symbols to let the modular SMMUv3 driver use them. Pinning is necessary because a device constantly needs a valid ASID, unlike tasks that only require one when running. Without pinning, we would need to notify the IOMMU when we're about to use a new ASID for a task, and it would get complicated when a new task is assigned a shared ASID. Consider the following scenario with no ASID pinned: 1. Task t1 is running on CPUx with shared ASID (gen=1, asid=1) 2. Task t2 is scheduled on CPUx, gets ASID (1, 2) 3. Task tn is scheduled on CPUy, a rollover occurs, tn gets ASID (2, 1) We would now have to immediately generate a new ASID for t1, notify the IOMMU, and finally enable task tn. We are holding the lock during all that time, since we can't afford having another CPU trigger a rollover. The IOMMU issues invalidation commands that can take tens of milliseconds. It gets needlessly complicated. All we wanted to do was schedule task tn, that has no business with the IOMMU. By letting the IOMMU pin tasks when needed, we avoid stalling the slow path, and let the pinning fail when we're out of shareable ASIDs. After a rollover, the allocator expects at least one ASID to be available in addition to the reserved ones (one per CPU). So (NR_ASIDS - NR_CPUS - 1) is the maximum number of ASIDs that can be shared with the IOMMU. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20200918101852.582559-5-jean-philippe@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
2020-09-18 18:18:44 +08:00
if (pinned_asid_map)
bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS);
else if (arm64_kernel_unmapped_at_el0())
set_kpti_asid_bits(asid_map);
else
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
}
#define asid_gen_match(asid) \
(!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits))
static void flush_context(void)
{
int i;
u64 asid;
/* Update the list of reserved ASIDs and the ASID bitmap. */
set_reserved_asid_bits();
for_each_possible_cpu(i) {
asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
/*
* If this CPU has already been through a
* rollover, but hasn't run another task in
* the meantime, we must preserve its reserved
* ASID, as this is the only trace we have of
* the process it is still running.
*/
if (asid == 0)
asid = per_cpu(reserved_asids, i);
__set_bit(ctxid2asid(asid), asid_map);
per_cpu(reserved_asids, i) = asid;
}
/*
* Queue a TLB invalidation for each CPU to perform on next
* context-switch
*/
cpumask_setall(&tlb_flush_pending);
}
arm64: mm: keep reserved ASIDs in sync with mm after multiple rollovers Under some unusual context-switching patterns, it is possible to end up with multiple threads from the same mm running concurrently with different ASIDs: 1. CPU x schedules task t with mm p containing ASID a and generation g This task doesn't block and the CPU doesn't context switch. So: * per_cpu(active_asid, x) = {g,a} * p->context.id = {g,a} 2. Some other CPU generates an ASID rollover. The global generation is now (g + 1). CPU x is still running t, with no context switch and so per_cpu(reserved_asid, x) = {g,a} 3. CPU y schedules task t', which shares mm p with t. The generation mismatches, so we take the slowpath and hit the reserved ASID from CPU x. p is then updated so that p->context.id = {g + 1,a} 4. CPU y schedules some other task u, which has an mm != p. 5. Some other CPU generates *another* CPU rollover. The global generation is now (g + 2). CPU x is still running t, with no context switch and so per_cpu(reserved_asid, x) = {g,a}. 6. CPU y once again schedules task t', but now *fails* to hit the reserved ASID from CPU x because of the generation mismatch. This results in a new ASID being allocated, despite the fact that t is still running on CPU x with the same mm. Consequently, TLBIs (e.g. as a result of CoW) will not be synchronised between the two threads. This patch fixes the problem by updating all of the matching reserved ASIDs when we hit on the slowpath (i.e. in step 3 above). This keeps the reserved ASIDs in-sync with the mm and avoids the problem. Reported-by: Tony Thompson <anthony.thompson@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-11-26 21:49:39 +08:00
static bool check_update_reserved_asid(u64 asid, u64 newasid)
{
int cpu;
arm64: mm: keep reserved ASIDs in sync with mm after multiple rollovers Under some unusual context-switching patterns, it is possible to end up with multiple threads from the same mm running concurrently with different ASIDs: 1. CPU x schedules task t with mm p containing ASID a and generation g This task doesn't block and the CPU doesn't context switch. So: * per_cpu(active_asid, x) = {g,a} * p->context.id = {g,a} 2. Some other CPU generates an ASID rollover. The global generation is now (g + 1). CPU x is still running t, with no context switch and so per_cpu(reserved_asid, x) = {g,a} 3. CPU y schedules task t', which shares mm p with t. The generation mismatches, so we take the slowpath and hit the reserved ASID from CPU x. p is then updated so that p->context.id = {g + 1,a} 4. CPU y schedules some other task u, which has an mm != p. 5. Some other CPU generates *another* CPU rollover. The global generation is now (g + 2). CPU x is still running t, with no context switch and so per_cpu(reserved_asid, x) = {g,a}. 6. CPU y once again schedules task t', but now *fails* to hit the reserved ASID from CPU x because of the generation mismatch. This results in a new ASID being allocated, despite the fact that t is still running on CPU x with the same mm. Consequently, TLBIs (e.g. as a result of CoW) will not be synchronised between the two threads. This patch fixes the problem by updating all of the matching reserved ASIDs when we hit on the slowpath (i.e. in step 3 above). This keeps the reserved ASIDs in-sync with the mm and avoids the problem. Reported-by: Tony Thompson <anthony.thompson@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-11-26 21:49:39 +08:00
bool hit = false;
/*
* Iterate over the set of reserved ASIDs looking for a match.
* If we find one, then we can update our mm to use newasid
* (i.e. the same ASID in the current generation) but we can't
* exit the loop early, since we need to ensure that all copies
* of the old ASID are updated to reflect the mm. Failure to do
* so could result in us missing the reserved ASID in a future
* generation.
*/
for_each_possible_cpu(cpu) {
if (per_cpu(reserved_asids, cpu) == asid) {
hit = true;
per_cpu(reserved_asids, cpu) = newasid;
}
}
return hit;
}
static u64 new_context(struct mm_struct *mm)
{
static u32 cur_idx = 1;
u64 asid = atomic64_read(&mm->context.id);
u64 generation = atomic64_read(&asid_generation);
if (asid != 0) {
u64 newasid = asid2ctxid(ctxid2asid(asid), generation);
arm64: mm: keep reserved ASIDs in sync with mm after multiple rollovers Under some unusual context-switching patterns, it is possible to end up with multiple threads from the same mm running concurrently with different ASIDs: 1. CPU x schedules task t with mm p containing ASID a and generation g This task doesn't block and the CPU doesn't context switch. So: * per_cpu(active_asid, x) = {g,a} * p->context.id = {g,a} 2. Some other CPU generates an ASID rollover. The global generation is now (g + 1). CPU x is still running t, with no context switch and so per_cpu(reserved_asid, x) = {g,a} 3. CPU y schedules task t', which shares mm p with t. The generation mismatches, so we take the slowpath and hit the reserved ASID from CPU x. p is then updated so that p->context.id = {g + 1,a} 4. CPU y schedules some other task u, which has an mm != p. 5. Some other CPU generates *another* CPU rollover. The global generation is now (g + 2). CPU x is still running t, with no context switch and so per_cpu(reserved_asid, x) = {g,a}. 6. CPU y once again schedules task t', but now *fails* to hit the reserved ASID from CPU x because of the generation mismatch. This results in a new ASID being allocated, despite the fact that t is still running on CPU x with the same mm. Consequently, TLBIs (e.g. as a result of CoW) will not be synchronised between the two threads. This patch fixes the problem by updating all of the matching reserved ASIDs when we hit on the slowpath (i.e. in step 3 above). This keeps the reserved ASIDs in-sync with the mm and avoids the problem. Reported-by: Tony Thompson <anthony.thompson@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-11-26 21:49:39 +08:00
/*
* If our current ASID was active during a rollover, we
* can continue to use it and this was just a false alarm.
*/
arm64: mm: keep reserved ASIDs in sync with mm after multiple rollovers Under some unusual context-switching patterns, it is possible to end up with multiple threads from the same mm running concurrently with different ASIDs: 1. CPU x schedules task t with mm p containing ASID a and generation g This task doesn't block and the CPU doesn't context switch. So: * per_cpu(active_asid, x) = {g,a} * p->context.id = {g,a} 2. Some other CPU generates an ASID rollover. The global generation is now (g + 1). CPU x is still running t, with no context switch and so per_cpu(reserved_asid, x) = {g,a} 3. CPU y schedules task t', which shares mm p with t. The generation mismatches, so we take the slowpath and hit the reserved ASID from CPU x. p is then updated so that p->context.id = {g + 1,a} 4. CPU y schedules some other task u, which has an mm != p. 5. Some other CPU generates *another* CPU rollover. The global generation is now (g + 2). CPU x is still running t, with no context switch and so per_cpu(reserved_asid, x) = {g,a}. 6. CPU y once again schedules task t', but now *fails* to hit the reserved ASID from CPU x because of the generation mismatch. This results in a new ASID being allocated, despite the fact that t is still running on CPU x with the same mm. Consequently, TLBIs (e.g. as a result of CoW) will not be synchronised between the two threads. This patch fixes the problem by updating all of the matching reserved ASIDs when we hit on the slowpath (i.e. in step 3 above). This keeps the reserved ASIDs in-sync with the mm and avoids the problem. Reported-by: Tony Thompson <anthony.thompson@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-11-26 21:49:39 +08:00
if (check_update_reserved_asid(asid, newasid))
return newasid;
arm64: mm: Pin down ASIDs for sharing mm with devices To enable address space sharing with the IOMMU, introduce arm64_mm_context_get() and arm64_mm_context_put(), that pin down a context and ensure that it will keep its ASID after a rollover. Export the symbols to let the modular SMMUv3 driver use them. Pinning is necessary because a device constantly needs a valid ASID, unlike tasks that only require one when running. Without pinning, we would need to notify the IOMMU when we're about to use a new ASID for a task, and it would get complicated when a new task is assigned a shared ASID. Consider the following scenario with no ASID pinned: 1. Task t1 is running on CPUx with shared ASID (gen=1, asid=1) 2. Task t2 is scheduled on CPUx, gets ASID (1, 2) 3. Task tn is scheduled on CPUy, a rollover occurs, tn gets ASID (2, 1) We would now have to immediately generate a new ASID for t1, notify the IOMMU, and finally enable task tn. We are holding the lock during all that time, since we can't afford having another CPU trigger a rollover. The IOMMU issues invalidation commands that can take tens of milliseconds. It gets needlessly complicated. All we wanted to do was schedule task tn, that has no business with the IOMMU. By letting the IOMMU pin tasks when needed, we avoid stalling the slow path, and let the pinning fail when we're out of shareable ASIDs. After a rollover, the allocator expects at least one ASID to be available in addition to the reserved ones (one per CPU). So (NR_ASIDS - NR_CPUS - 1) is the maximum number of ASIDs that can be shared with the IOMMU. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20200918101852.582559-5-jean-philippe@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
2020-09-18 18:18:44 +08:00
/*
* If it is pinned, we can keep using it. Note that reserved
* takes priority, because even if it is also pinned, we need to
* update the generation into the reserved_asids.
*/
if (refcount_read(&mm->context.pinned))
return newasid;
/*
* We had a valid ASID in a previous life, so try to re-use
* it if possible.
*/
if (!__test_and_set_bit(ctxid2asid(asid), asid_map))
arm64: mm: keep reserved ASIDs in sync with mm after multiple rollovers Under some unusual context-switching patterns, it is possible to end up with multiple threads from the same mm running concurrently with different ASIDs: 1. CPU x schedules task t with mm p containing ASID a and generation g This task doesn't block and the CPU doesn't context switch. So: * per_cpu(active_asid, x) = {g,a} * p->context.id = {g,a} 2. Some other CPU generates an ASID rollover. The global generation is now (g + 1). CPU x is still running t, with no context switch and so per_cpu(reserved_asid, x) = {g,a} 3. CPU y schedules task t', which shares mm p with t. The generation mismatches, so we take the slowpath and hit the reserved ASID from CPU x. p is then updated so that p->context.id = {g + 1,a} 4. CPU y schedules some other task u, which has an mm != p. 5. Some other CPU generates *another* CPU rollover. The global generation is now (g + 2). CPU x is still running t, with no context switch and so per_cpu(reserved_asid, x) = {g,a}. 6. CPU y once again schedules task t', but now *fails* to hit the reserved ASID from CPU x because of the generation mismatch. This results in a new ASID being allocated, despite the fact that t is still running on CPU x with the same mm. Consequently, TLBIs (e.g. as a result of CoW) will not be synchronised between the two threads. This patch fixes the problem by updating all of the matching reserved ASIDs when we hit on the slowpath (i.e. in step 3 above). This keeps the reserved ASIDs in-sync with the mm and avoids the problem. Reported-by: Tony Thompson <anthony.thompson@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-11-26 21:49:39 +08:00
return newasid;
}
/*
* Allocate a free ASID. If we can't find one, take a note of the
* currently active ASIDs and mark the TLBs as requiring flushes. We
* always count from ASID #2 (index 1), as we use ASID #0 when setting
* a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
* pairs.
*/
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
if (asid != NUM_USER_ASIDS)
goto set_asid;
/* We're out of ASIDs, so increment the global generation count */
generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
&asid_generation);
flush_context();
/* We have more ASIDs than CPUs, so this will always succeed */
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
return asid2ctxid(asid, generation);
}
arm64/mm: save memory access in check_and_switch_context() fast switch path On arm64, smp_processor_id() reads a per-cpu `cpu_number` variable, using the per-cpu offset stored in the tpidr_el1 system register. In some cases we generate a per-cpu address with a sequence like: cpu_ptr = &per_cpu(ptr, smp_processor_id()); Which potentially incurs a cache miss for both `cpu_number` and the in-memory `__per_cpu_offset` array. This can be written more optimally as: cpu_ptr = this_cpu_ptr(ptr); Which only needs the offset from tpidr_el1, and does not need to load from memory. The following two test cases show a small performance improvement measured on a 46-cpus qualcomm machine with 5.8.0-rc4 kernel. Test 1: (about 0.3% improvement) #cat b.sh make clean && make all -j138 #perf stat --repeat 10 --null --sync sh b.sh - before this patch Performance counter stats for 'sh b.sh' (10 runs): 298.62 +- 1.86 seconds time elapsed ( +- 0.62% ) - after this patch Performance counter stats for 'sh b.sh' (10 runs): 297.734 +- 0.954 seconds time elapsed ( +- 0.32% ) Test 2: (about 1.69% improvement) 'perf stat -r 10 perf bench sched messaging' Then sum the total time of 'sched/messaging' by manual. - before this patch total 0.707 sec for 10 times - after this patch totol 0.695 sec for 10 times Signed-off-by: Pingfan Liu <kernelfans@gmail.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Steve Capper <steve.capper@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Vladimir Murzin <vladimir.murzin@arm.com> Cc: Jean-Philippe Brucker <jean-philippe@linaro.org> Link: https://lore.kernel.org/r/1594389852-19949-1-git-send-email-kernelfans@gmail.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-07-10 22:04:12 +08:00
void check_and_switch_context(struct mm_struct *mm)
{
unsigned long flags;
arm64/mm: save memory access in check_and_switch_context() fast switch path On arm64, smp_processor_id() reads a per-cpu `cpu_number` variable, using the per-cpu offset stored in the tpidr_el1 system register. In some cases we generate a per-cpu address with a sequence like: cpu_ptr = &per_cpu(ptr, smp_processor_id()); Which potentially incurs a cache miss for both `cpu_number` and the in-memory `__per_cpu_offset` array. This can be written more optimally as: cpu_ptr = this_cpu_ptr(ptr); Which only needs the offset from tpidr_el1, and does not need to load from memory. The following two test cases show a small performance improvement measured on a 46-cpus qualcomm machine with 5.8.0-rc4 kernel. Test 1: (about 0.3% improvement) #cat b.sh make clean && make all -j138 #perf stat --repeat 10 --null --sync sh b.sh - before this patch Performance counter stats for 'sh b.sh' (10 runs): 298.62 +- 1.86 seconds time elapsed ( +- 0.62% ) - after this patch Performance counter stats for 'sh b.sh' (10 runs): 297.734 +- 0.954 seconds time elapsed ( +- 0.32% ) Test 2: (about 1.69% improvement) 'perf stat -r 10 perf bench sched messaging' Then sum the total time of 'sched/messaging' by manual. - before this patch total 0.707 sec for 10 times - after this patch totol 0.695 sec for 10 times Signed-off-by: Pingfan Liu <kernelfans@gmail.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Steve Capper <steve.capper@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Vladimir Murzin <vladimir.murzin@arm.com> Cc: Jean-Philippe Brucker <jean-philippe@linaro.org> Link: https://lore.kernel.org/r/1594389852-19949-1-git-send-email-kernelfans@gmail.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-07-10 22:04:12 +08:00
unsigned int cpu;
arm64: asid: Do not replace active_asids if already 0 Under some uncommon timing conditions, a generation check and xchg(active_asids, A1) in check_and_switch_context() on P1 can race with an ASID roll-over on P2. If P2 has not seen the update to active_asids[P1], it can re-allocate A1 to a new task T2 on P2. P1 ends up waiting on the spinlock since the xchg() returned 0 while P2 can go through a second ASID roll-over with (T2,A1,G2) active on P2. This roll-over copies active_asids[P1] == A1,G1 into reserved_asids[P1] and active_asids[P2] == A1,G2 into reserved_asids[P2]. A subsequent scheduling of T1 on P1 and T2 on P2 would match reserved_asids and get their generation bumped to G3: P1 P2 -- -- TTBR0.BADDR = T0 TTBR0.ASID = A0 asid_generation = G1 check_and_switch_context(T1,A1,G1) generation match check_and_switch_context(T2,A0,G0) new_context() ASID roll-over asid_generation = G2 flush_context() active_asids[P1] = 0 asid_map[A1] = 0 reserved_asids[P1] = A0,G0 xchg(active_asids, A1) active_asids[P1] = A1,G1 xchg returns 0 spin_lock_irqsave() allocated ASID (T2,A1,G2) asid_map[A1] = 1 active_asids[P2] = A1,G2 ... check_and_switch_context(T3,A0,G0) new_context() ASID roll-over asid_generation = G3 flush_context() active_asids[P1] = 0 asid_map[A1] = 1 reserved_asids[P1] = A1,G1 reserved_asids[P2] = A1,G2 allocated ASID (T3,A2,G3) asid_map[A2] = 1 active_asids[P2] = A2,G3 new_context() check_update_reserved_asid(A1,G1) matches reserved_asid[P1] reserved_asid[P1] = A1,G3 updated T1 ASID to (T1,A1,G3) check_and_switch_context(T2,A1,G2) new_context() check_and_switch_context(A1,G2) matches reserved_asids[P2] reserved_asids[P2] = A1,G3 updated T2 ASID to (T2,A1,G3) At this point, we have two tasks, T1 and T2 both using ASID A1 with the latest generation G3. Any of them is allowed to be scheduled on the other CPU leading to two different tasks with the same ASID on the same CPU. This patch changes the xchg to cmpxchg so that the active_asids is only updated if non-zero to avoid a race with an ASID roll-over on a different CPU. The ASID allocation algorithm has been formally verified using the TLA+ model checker (see https://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/kernel-tla.git/tree/asidalloc.tla for the spec). Reviewed-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-12-27 23:12:56 +08:00
u64 asid, old_active_asid;
arm64: mm: Support Common Not Private translations Common Not Private (CNP) is a feature of ARMv8.2 extension which allows translation table entries to be shared between different PEs in the same inner shareable domain, so the hardware can use this fact to optimise the caching of such entries in the TLB. CNP occupies one bit in TTBRx_ELy and VTTBR_EL2, which advertises to the hardware that the translation table entries pointed to by this TTBR are the same as every PE in the same inner shareable domain for which the equivalent TTBR also has CNP bit set. In case CNP bit is set but TTBR does not point at the same translation table entries for a given ASID and VMID, then the system is mis-configured, so the results of translations are UNPREDICTABLE. For kernel we postpone setting CNP till all cpus are up and rely on cpufeature framework to 1) patch the code which is sensitive to CNP and 2) update TTBR1_EL1 with CNP bit set. TTBR1_EL1 can be reprogrammed as result of hibernation or cpuidle (via __enable_mmu). For these two cases we restore CnP bit via __cpu_suspend_exit(). There are a few cases we need to care of changes in TTBR0_EL1: - a switch to idmap - software emulated PAN we rule out latter via Kconfig options and for the former we make sure that CNP is set for non-zero ASIDs only. Reviewed-by: James Morse <james.morse@arm.com> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com> [catalin.marinas@arm.com: default y for CONFIG_ARM64_CNP] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2018-07-31 21:08:56 +08:00
if (system_supports_cnp())
cpu_set_reserved_ttbr0();
asid = atomic64_read(&mm->context.id);
/*
* The memory ordering here is subtle.
arm64: asid: Do not replace active_asids if already 0 Under some uncommon timing conditions, a generation check and xchg(active_asids, A1) in check_and_switch_context() on P1 can race with an ASID roll-over on P2. If P2 has not seen the update to active_asids[P1], it can re-allocate A1 to a new task T2 on P2. P1 ends up waiting on the spinlock since the xchg() returned 0 while P2 can go through a second ASID roll-over with (T2,A1,G2) active on P2. This roll-over copies active_asids[P1] == A1,G1 into reserved_asids[P1] and active_asids[P2] == A1,G2 into reserved_asids[P2]. A subsequent scheduling of T1 on P1 and T2 on P2 would match reserved_asids and get their generation bumped to G3: P1 P2 -- -- TTBR0.BADDR = T0 TTBR0.ASID = A0 asid_generation = G1 check_and_switch_context(T1,A1,G1) generation match check_and_switch_context(T2,A0,G0) new_context() ASID roll-over asid_generation = G2 flush_context() active_asids[P1] = 0 asid_map[A1] = 0 reserved_asids[P1] = A0,G0 xchg(active_asids, A1) active_asids[P1] = A1,G1 xchg returns 0 spin_lock_irqsave() allocated ASID (T2,A1,G2) asid_map[A1] = 1 active_asids[P2] = A1,G2 ... check_and_switch_context(T3,A0,G0) new_context() ASID roll-over asid_generation = G3 flush_context() active_asids[P1] = 0 asid_map[A1] = 1 reserved_asids[P1] = A1,G1 reserved_asids[P2] = A1,G2 allocated ASID (T3,A2,G3) asid_map[A2] = 1 active_asids[P2] = A2,G3 new_context() check_update_reserved_asid(A1,G1) matches reserved_asid[P1] reserved_asid[P1] = A1,G3 updated T1 ASID to (T1,A1,G3) check_and_switch_context(T2,A1,G2) new_context() check_and_switch_context(A1,G2) matches reserved_asids[P2] reserved_asids[P2] = A1,G3 updated T2 ASID to (T2,A1,G3) At this point, we have two tasks, T1 and T2 both using ASID A1 with the latest generation G3. Any of them is allowed to be scheduled on the other CPU leading to two different tasks with the same ASID on the same CPU. This patch changes the xchg to cmpxchg so that the active_asids is only updated if non-zero to avoid a race with an ASID roll-over on a different CPU. The ASID allocation algorithm has been formally verified using the TLA+ model checker (see https://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/kernel-tla.git/tree/asidalloc.tla for the spec). Reviewed-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-12-27 23:12:56 +08:00
* If our active_asids is non-zero and the ASID matches the current
* generation, then we update the active_asids entry with a relaxed
* cmpxchg. Racing with a concurrent rollover means that either:
*
arm64: asid: Do not replace active_asids if already 0 Under some uncommon timing conditions, a generation check and xchg(active_asids, A1) in check_and_switch_context() on P1 can race with an ASID roll-over on P2. If P2 has not seen the update to active_asids[P1], it can re-allocate A1 to a new task T2 on P2. P1 ends up waiting on the spinlock since the xchg() returned 0 while P2 can go through a second ASID roll-over with (T2,A1,G2) active on P2. This roll-over copies active_asids[P1] == A1,G1 into reserved_asids[P1] and active_asids[P2] == A1,G2 into reserved_asids[P2]. A subsequent scheduling of T1 on P1 and T2 on P2 would match reserved_asids and get their generation bumped to G3: P1 P2 -- -- TTBR0.BADDR = T0 TTBR0.ASID = A0 asid_generation = G1 check_and_switch_context(T1,A1,G1) generation match check_and_switch_context(T2,A0,G0) new_context() ASID roll-over asid_generation = G2 flush_context() active_asids[P1] = 0 asid_map[A1] = 0 reserved_asids[P1] = A0,G0 xchg(active_asids, A1) active_asids[P1] = A1,G1 xchg returns 0 spin_lock_irqsave() allocated ASID (T2,A1,G2) asid_map[A1] = 1 active_asids[P2] = A1,G2 ... check_and_switch_context(T3,A0,G0) new_context() ASID roll-over asid_generation = G3 flush_context() active_asids[P1] = 0 asid_map[A1] = 1 reserved_asids[P1] = A1,G1 reserved_asids[P2] = A1,G2 allocated ASID (T3,A2,G3) asid_map[A2] = 1 active_asids[P2] = A2,G3 new_context() check_update_reserved_asid(A1,G1) matches reserved_asid[P1] reserved_asid[P1] = A1,G3 updated T1 ASID to (T1,A1,G3) check_and_switch_context(T2,A1,G2) new_context() check_and_switch_context(A1,G2) matches reserved_asids[P2] reserved_asids[P2] = A1,G3 updated T2 ASID to (T2,A1,G3) At this point, we have two tasks, T1 and T2 both using ASID A1 with the latest generation G3. Any of them is allowed to be scheduled on the other CPU leading to two different tasks with the same ASID on the same CPU. This patch changes the xchg to cmpxchg so that the active_asids is only updated if non-zero to avoid a race with an ASID roll-over on a different CPU. The ASID allocation algorithm has been formally verified using the TLA+ model checker (see https://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/kernel-tla.git/tree/asidalloc.tla for the spec). Reviewed-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-12-27 23:12:56 +08:00
* - We get a zero back from the cmpxchg and end up waiting on the
* lock. Taking the lock synchronises with the rollover and so
* we are forced to see the updated generation.
*
arm64: asid: Do not replace active_asids if already 0 Under some uncommon timing conditions, a generation check and xchg(active_asids, A1) in check_and_switch_context() on P1 can race with an ASID roll-over on P2. If P2 has not seen the update to active_asids[P1], it can re-allocate A1 to a new task T2 on P2. P1 ends up waiting on the spinlock since the xchg() returned 0 while P2 can go through a second ASID roll-over with (T2,A1,G2) active on P2. This roll-over copies active_asids[P1] == A1,G1 into reserved_asids[P1] and active_asids[P2] == A1,G2 into reserved_asids[P2]. A subsequent scheduling of T1 on P1 and T2 on P2 would match reserved_asids and get their generation bumped to G3: P1 P2 -- -- TTBR0.BADDR = T0 TTBR0.ASID = A0 asid_generation = G1 check_and_switch_context(T1,A1,G1) generation match check_and_switch_context(T2,A0,G0) new_context() ASID roll-over asid_generation = G2 flush_context() active_asids[P1] = 0 asid_map[A1] = 0 reserved_asids[P1] = A0,G0 xchg(active_asids, A1) active_asids[P1] = A1,G1 xchg returns 0 spin_lock_irqsave() allocated ASID (T2,A1,G2) asid_map[A1] = 1 active_asids[P2] = A1,G2 ... check_and_switch_context(T3,A0,G0) new_context() ASID roll-over asid_generation = G3 flush_context() active_asids[P1] = 0 asid_map[A1] = 1 reserved_asids[P1] = A1,G1 reserved_asids[P2] = A1,G2 allocated ASID (T3,A2,G3) asid_map[A2] = 1 active_asids[P2] = A2,G3 new_context() check_update_reserved_asid(A1,G1) matches reserved_asid[P1] reserved_asid[P1] = A1,G3 updated T1 ASID to (T1,A1,G3) check_and_switch_context(T2,A1,G2) new_context() check_and_switch_context(A1,G2) matches reserved_asids[P2] reserved_asids[P2] = A1,G3 updated T2 ASID to (T2,A1,G3) At this point, we have two tasks, T1 and T2 both using ASID A1 with the latest generation G3. Any of them is allowed to be scheduled on the other CPU leading to two different tasks with the same ASID on the same CPU. This patch changes the xchg to cmpxchg so that the active_asids is only updated if non-zero to avoid a race with an ASID roll-over on a different CPU. The ASID allocation algorithm has been formally verified using the TLA+ model checker (see https://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/kernel-tla.git/tree/asidalloc.tla for the spec). Reviewed-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-12-27 23:12:56 +08:00
* - We get a valid ASID back from the cmpxchg, which means the
* relaxed xchg in flush_context will treat us as reserved
* because atomic RmWs are totally ordered for a given location.
*/
arm64/mm: save memory access in check_and_switch_context() fast switch path On arm64, smp_processor_id() reads a per-cpu `cpu_number` variable, using the per-cpu offset stored in the tpidr_el1 system register. In some cases we generate a per-cpu address with a sequence like: cpu_ptr = &per_cpu(ptr, smp_processor_id()); Which potentially incurs a cache miss for both `cpu_number` and the in-memory `__per_cpu_offset` array. This can be written more optimally as: cpu_ptr = this_cpu_ptr(ptr); Which only needs the offset from tpidr_el1, and does not need to load from memory. The following two test cases show a small performance improvement measured on a 46-cpus qualcomm machine with 5.8.0-rc4 kernel. Test 1: (about 0.3% improvement) #cat b.sh make clean && make all -j138 #perf stat --repeat 10 --null --sync sh b.sh - before this patch Performance counter stats for 'sh b.sh' (10 runs): 298.62 +- 1.86 seconds time elapsed ( +- 0.62% ) - after this patch Performance counter stats for 'sh b.sh' (10 runs): 297.734 +- 0.954 seconds time elapsed ( +- 0.32% ) Test 2: (about 1.69% improvement) 'perf stat -r 10 perf bench sched messaging' Then sum the total time of 'sched/messaging' by manual. - before this patch total 0.707 sec for 10 times - after this patch totol 0.695 sec for 10 times Signed-off-by: Pingfan Liu <kernelfans@gmail.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Steve Capper <steve.capper@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Vladimir Murzin <vladimir.murzin@arm.com> Cc: Jean-Philippe Brucker <jean-philippe@linaro.org> Link: https://lore.kernel.org/r/1594389852-19949-1-git-send-email-kernelfans@gmail.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-07-10 22:04:12 +08:00
old_active_asid = atomic64_read(this_cpu_ptr(&active_asids));
if (old_active_asid && asid_gen_match(asid) &&
arm64/mm: save memory access in check_and_switch_context() fast switch path On arm64, smp_processor_id() reads a per-cpu `cpu_number` variable, using the per-cpu offset stored in the tpidr_el1 system register. In some cases we generate a per-cpu address with a sequence like: cpu_ptr = &per_cpu(ptr, smp_processor_id()); Which potentially incurs a cache miss for both `cpu_number` and the in-memory `__per_cpu_offset` array. This can be written more optimally as: cpu_ptr = this_cpu_ptr(ptr); Which only needs the offset from tpidr_el1, and does not need to load from memory. The following two test cases show a small performance improvement measured on a 46-cpus qualcomm machine with 5.8.0-rc4 kernel. Test 1: (about 0.3% improvement) #cat b.sh make clean && make all -j138 #perf stat --repeat 10 --null --sync sh b.sh - before this patch Performance counter stats for 'sh b.sh' (10 runs): 298.62 +- 1.86 seconds time elapsed ( +- 0.62% ) - after this patch Performance counter stats for 'sh b.sh' (10 runs): 297.734 +- 0.954 seconds time elapsed ( +- 0.32% ) Test 2: (about 1.69% improvement) 'perf stat -r 10 perf bench sched messaging' Then sum the total time of 'sched/messaging' by manual. - before this patch total 0.707 sec for 10 times - after this patch totol 0.695 sec for 10 times Signed-off-by: Pingfan Liu <kernelfans@gmail.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Steve Capper <steve.capper@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Vladimir Murzin <vladimir.murzin@arm.com> Cc: Jean-Philippe Brucker <jean-philippe@linaro.org> Link: https://lore.kernel.org/r/1594389852-19949-1-git-send-email-kernelfans@gmail.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-07-10 22:04:12 +08:00
atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_asids),
arm64: asid: Do not replace active_asids if already 0 Under some uncommon timing conditions, a generation check and xchg(active_asids, A1) in check_and_switch_context() on P1 can race with an ASID roll-over on P2. If P2 has not seen the update to active_asids[P1], it can re-allocate A1 to a new task T2 on P2. P1 ends up waiting on the spinlock since the xchg() returned 0 while P2 can go through a second ASID roll-over with (T2,A1,G2) active on P2. This roll-over copies active_asids[P1] == A1,G1 into reserved_asids[P1] and active_asids[P2] == A1,G2 into reserved_asids[P2]. A subsequent scheduling of T1 on P1 and T2 on P2 would match reserved_asids and get their generation bumped to G3: P1 P2 -- -- TTBR0.BADDR = T0 TTBR0.ASID = A0 asid_generation = G1 check_and_switch_context(T1,A1,G1) generation match check_and_switch_context(T2,A0,G0) new_context() ASID roll-over asid_generation = G2 flush_context() active_asids[P1] = 0 asid_map[A1] = 0 reserved_asids[P1] = A0,G0 xchg(active_asids, A1) active_asids[P1] = A1,G1 xchg returns 0 spin_lock_irqsave() allocated ASID (T2,A1,G2) asid_map[A1] = 1 active_asids[P2] = A1,G2 ... check_and_switch_context(T3,A0,G0) new_context() ASID roll-over asid_generation = G3 flush_context() active_asids[P1] = 0 asid_map[A1] = 1 reserved_asids[P1] = A1,G1 reserved_asids[P2] = A1,G2 allocated ASID (T3,A2,G3) asid_map[A2] = 1 active_asids[P2] = A2,G3 new_context() check_update_reserved_asid(A1,G1) matches reserved_asid[P1] reserved_asid[P1] = A1,G3 updated T1 ASID to (T1,A1,G3) check_and_switch_context(T2,A1,G2) new_context() check_and_switch_context(A1,G2) matches reserved_asids[P2] reserved_asids[P2] = A1,G3 updated T2 ASID to (T2,A1,G3) At this point, we have two tasks, T1 and T2 both using ASID A1 with the latest generation G3. Any of them is allowed to be scheduled on the other CPU leading to two different tasks with the same ASID on the same CPU. This patch changes the xchg to cmpxchg so that the active_asids is only updated if non-zero to avoid a race with an ASID roll-over on a different CPU. The ASID allocation algorithm has been formally verified using the TLA+ model checker (see https://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/kernel-tla.git/tree/asidalloc.tla for the spec). Reviewed-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-12-27 23:12:56 +08:00
old_active_asid, asid))
goto switch_mm_fastpath;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
/* Check that our ASID belongs to the current generation. */
asid = atomic64_read(&mm->context.id);
if (!asid_gen_match(asid)) {
asid = new_context(mm);
atomic64_set(&mm->context.id, asid);
}
arm64/mm: save memory access in check_and_switch_context() fast switch path On arm64, smp_processor_id() reads a per-cpu `cpu_number` variable, using the per-cpu offset stored in the tpidr_el1 system register. In some cases we generate a per-cpu address with a sequence like: cpu_ptr = &per_cpu(ptr, smp_processor_id()); Which potentially incurs a cache miss for both `cpu_number` and the in-memory `__per_cpu_offset` array. This can be written more optimally as: cpu_ptr = this_cpu_ptr(ptr); Which only needs the offset from tpidr_el1, and does not need to load from memory. The following two test cases show a small performance improvement measured on a 46-cpus qualcomm machine with 5.8.0-rc4 kernel. Test 1: (about 0.3% improvement) #cat b.sh make clean && make all -j138 #perf stat --repeat 10 --null --sync sh b.sh - before this patch Performance counter stats for 'sh b.sh' (10 runs): 298.62 +- 1.86 seconds time elapsed ( +- 0.62% ) - after this patch Performance counter stats for 'sh b.sh' (10 runs): 297.734 +- 0.954 seconds time elapsed ( +- 0.32% ) Test 2: (about 1.69% improvement) 'perf stat -r 10 perf bench sched messaging' Then sum the total time of 'sched/messaging' by manual. - before this patch total 0.707 sec for 10 times - after this patch totol 0.695 sec for 10 times Signed-off-by: Pingfan Liu <kernelfans@gmail.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Steve Capper <steve.capper@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Vladimir Murzin <vladimir.murzin@arm.com> Cc: Jean-Philippe Brucker <jean-philippe@linaro.org> Link: https://lore.kernel.org/r/1594389852-19949-1-git-send-email-kernelfans@gmail.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-07-10 22:04:12 +08:00
cpu = smp_processor_id();
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
local_flush_tlb_all();
arm64/mm: save memory access in check_and_switch_context() fast switch path On arm64, smp_processor_id() reads a per-cpu `cpu_number` variable, using the per-cpu offset stored in the tpidr_el1 system register. In some cases we generate a per-cpu address with a sequence like: cpu_ptr = &per_cpu(ptr, smp_processor_id()); Which potentially incurs a cache miss for both `cpu_number` and the in-memory `__per_cpu_offset` array. This can be written more optimally as: cpu_ptr = this_cpu_ptr(ptr); Which only needs the offset from tpidr_el1, and does not need to load from memory. The following two test cases show a small performance improvement measured on a 46-cpus qualcomm machine with 5.8.0-rc4 kernel. Test 1: (about 0.3% improvement) #cat b.sh make clean && make all -j138 #perf stat --repeat 10 --null --sync sh b.sh - before this patch Performance counter stats for 'sh b.sh' (10 runs): 298.62 +- 1.86 seconds time elapsed ( +- 0.62% ) - after this patch Performance counter stats for 'sh b.sh' (10 runs): 297.734 +- 0.954 seconds time elapsed ( +- 0.32% ) Test 2: (about 1.69% improvement) 'perf stat -r 10 perf bench sched messaging' Then sum the total time of 'sched/messaging' by manual. - before this patch total 0.707 sec for 10 times - after this patch totol 0.695 sec for 10 times Signed-off-by: Pingfan Liu <kernelfans@gmail.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Steve Capper <steve.capper@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Vladimir Murzin <vladimir.murzin@arm.com> Cc: Jean-Philippe Brucker <jean-philippe@linaro.org> Link: https://lore.kernel.org/r/1594389852-19949-1-git-send-email-kernelfans@gmail.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-07-10 22:04:12 +08:00
atomic64_set(this_cpu_ptr(&active_asids), asid);
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
arm64_apply_bp_hardening();
/*
* Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
* emulating PAN.
*/
if (!system_uses_ttbr0_pan())
cpu_switch_mm(mm->pgd, mm);
}
arm64: mm: Pin down ASIDs for sharing mm with devices To enable address space sharing with the IOMMU, introduce arm64_mm_context_get() and arm64_mm_context_put(), that pin down a context and ensure that it will keep its ASID after a rollover. Export the symbols to let the modular SMMUv3 driver use them. Pinning is necessary because a device constantly needs a valid ASID, unlike tasks that only require one when running. Without pinning, we would need to notify the IOMMU when we're about to use a new ASID for a task, and it would get complicated when a new task is assigned a shared ASID. Consider the following scenario with no ASID pinned: 1. Task t1 is running on CPUx with shared ASID (gen=1, asid=1) 2. Task t2 is scheduled on CPUx, gets ASID (1, 2) 3. Task tn is scheduled on CPUy, a rollover occurs, tn gets ASID (2, 1) We would now have to immediately generate a new ASID for t1, notify the IOMMU, and finally enable task tn. We are holding the lock during all that time, since we can't afford having another CPU trigger a rollover. The IOMMU issues invalidation commands that can take tens of milliseconds. It gets needlessly complicated. All we wanted to do was schedule task tn, that has no business with the IOMMU. By letting the IOMMU pin tasks when needed, we avoid stalling the slow path, and let the pinning fail when we're out of shareable ASIDs. After a rollover, the allocator expects at least one ASID to be available in addition to the reserved ones (one per CPU). So (NR_ASIDS - NR_CPUS - 1) is the maximum number of ASIDs that can be shared with the IOMMU. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20200918101852.582559-5-jean-philippe@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
2020-09-18 18:18:44 +08:00
unsigned long arm64_mm_context_get(struct mm_struct *mm)
{
unsigned long flags;
u64 asid;
if (!pinned_asid_map)
return 0;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
asid = atomic64_read(&mm->context.id);
if (refcount_inc_not_zero(&mm->context.pinned))
goto out_unlock;
if (nr_pinned_asids >= max_pinned_asids) {
asid = 0;
goto out_unlock;
}
if (!asid_gen_match(asid)) {
/*
* We went through one or more rollover since that ASID was
* used. Ensure that it is still valid, or generate a new one.
*/
asid = new_context(mm);
atomic64_set(&mm->context.id, asid);
}
nr_pinned_asids++;
__set_bit(ctxid2asid(asid), pinned_asid_map);
arm64: mm: Pin down ASIDs for sharing mm with devices To enable address space sharing with the IOMMU, introduce arm64_mm_context_get() and arm64_mm_context_put(), that pin down a context and ensure that it will keep its ASID after a rollover. Export the symbols to let the modular SMMUv3 driver use them. Pinning is necessary because a device constantly needs a valid ASID, unlike tasks that only require one when running. Without pinning, we would need to notify the IOMMU when we're about to use a new ASID for a task, and it would get complicated when a new task is assigned a shared ASID. Consider the following scenario with no ASID pinned: 1. Task t1 is running on CPUx with shared ASID (gen=1, asid=1) 2. Task t2 is scheduled on CPUx, gets ASID (1, 2) 3. Task tn is scheduled on CPUy, a rollover occurs, tn gets ASID (2, 1) We would now have to immediately generate a new ASID for t1, notify the IOMMU, and finally enable task tn. We are holding the lock during all that time, since we can't afford having another CPU trigger a rollover. The IOMMU issues invalidation commands that can take tens of milliseconds. It gets needlessly complicated. All we wanted to do was schedule task tn, that has no business with the IOMMU. By letting the IOMMU pin tasks when needed, we avoid stalling the slow path, and let the pinning fail when we're out of shareable ASIDs. After a rollover, the allocator expects at least one ASID to be available in addition to the reserved ones (one per CPU). So (NR_ASIDS - NR_CPUS - 1) is the maximum number of ASIDs that can be shared with the IOMMU. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20200918101852.582559-5-jean-philippe@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
2020-09-18 18:18:44 +08:00
refcount_set(&mm->context.pinned, 1);
out_unlock:
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
asid = ctxid2asid(asid);
arm64: mm: Pin down ASIDs for sharing mm with devices To enable address space sharing with the IOMMU, introduce arm64_mm_context_get() and arm64_mm_context_put(), that pin down a context and ensure that it will keep its ASID after a rollover. Export the symbols to let the modular SMMUv3 driver use them. Pinning is necessary because a device constantly needs a valid ASID, unlike tasks that only require one when running. Without pinning, we would need to notify the IOMMU when we're about to use a new ASID for a task, and it would get complicated when a new task is assigned a shared ASID. Consider the following scenario with no ASID pinned: 1. Task t1 is running on CPUx with shared ASID (gen=1, asid=1) 2. Task t2 is scheduled on CPUx, gets ASID (1, 2) 3. Task tn is scheduled on CPUy, a rollover occurs, tn gets ASID (2, 1) We would now have to immediately generate a new ASID for t1, notify the IOMMU, and finally enable task tn. We are holding the lock during all that time, since we can't afford having another CPU trigger a rollover. The IOMMU issues invalidation commands that can take tens of milliseconds. It gets needlessly complicated. All we wanted to do was schedule task tn, that has no business with the IOMMU. By letting the IOMMU pin tasks when needed, we avoid stalling the slow path, and let the pinning fail when we're out of shareable ASIDs. After a rollover, the allocator expects at least one ASID to be available in addition to the reserved ones (one per CPU). So (NR_ASIDS - NR_CPUS - 1) is the maximum number of ASIDs that can be shared with the IOMMU. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20200918101852.582559-5-jean-philippe@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
2020-09-18 18:18:44 +08:00
/* Set the equivalent of USER_ASID_BIT */
if (asid && arm64_kernel_unmapped_at_el0())
asid |= 1;
return asid;
}
EXPORT_SYMBOL_GPL(arm64_mm_context_get);
void arm64_mm_context_put(struct mm_struct *mm)
{
unsigned long flags;
u64 asid = atomic64_read(&mm->context.id);
if (!pinned_asid_map)
return;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
if (refcount_dec_and_test(&mm->context.pinned)) {
__clear_bit(ctxid2asid(asid), pinned_asid_map);
arm64: mm: Pin down ASIDs for sharing mm with devices To enable address space sharing with the IOMMU, introduce arm64_mm_context_get() and arm64_mm_context_put(), that pin down a context and ensure that it will keep its ASID after a rollover. Export the symbols to let the modular SMMUv3 driver use them. Pinning is necessary because a device constantly needs a valid ASID, unlike tasks that only require one when running. Without pinning, we would need to notify the IOMMU when we're about to use a new ASID for a task, and it would get complicated when a new task is assigned a shared ASID. Consider the following scenario with no ASID pinned: 1. Task t1 is running on CPUx with shared ASID (gen=1, asid=1) 2. Task t2 is scheduled on CPUx, gets ASID (1, 2) 3. Task tn is scheduled on CPUy, a rollover occurs, tn gets ASID (2, 1) We would now have to immediately generate a new ASID for t1, notify the IOMMU, and finally enable task tn. We are holding the lock during all that time, since we can't afford having another CPU trigger a rollover. The IOMMU issues invalidation commands that can take tens of milliseconds. It gets needlessly complicated. All we wanted to do was schedule task tn, that has no business with the IOMMU. By letting the IOMMU pin tasks when needed, we avoid stalling the slow path, and let the pinning fail when we're out of shareable ASIDs. After a rollover, the allocator expects at least one ASID to be available in addition to the reserved ones (one per CPU). So (NR_ASIDS - NR_CPUS - 1) is the maximum number of ASIDs that can be shared with the IOMMU. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20200918101852.582559-5-jean-philippe@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
2020-09-18 18:18:44 +08:00
nr_pinned_asids--;
}
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
}
EXPORT_SYMBOL_GPL(arm64_mm_context_put);
/* Errata workaround post TTBRx_EL1 update. */
asmlinkage void post_ttbr_update_workaround(void)
{
if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456))
return;
asm(ALTERNATIVE("nop; nop; nop",
"ic iallu; dsb nsh; isb",
ARM64_WORKAROUND_CAVIUM_27456));
}
void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
{
unsigned long ttbr1 = read_sysreg(ttbr1_el1);
unsigned long asid = ASID(mm);
unsigned long ttbr0 = phys_to_ttbr(pgd_phys);
/* Skip CNP for the reserved ASID */
if (system_supports_cnp() && asid)
ttbr0 |= TTBR_CNP_BIT;
/* SW PAN needs a copy of the ASID in TTBR0 for entry */
if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
/* Set ASID in TTBR1 since TCR.A1 is set */
ttbr1 &= ~TTBR_ASID_MASK;
ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
write_sysreg(ttbr1, ttbr1_el1);
isb();
write_sysreg(ttbr0, ttbr0_el1);
isb();
post_ttbr_update_workaround();
}
static int asids_update_limit(void)
{
unsigned long num_available_asids = NUM_USER_ASIDS;
arm64: mm: Pin down ASIDs for sharing mm with devices To enable address space sharing with the IOMMU, introduce arm64_mm_context_get() and arm64_mm_context_put(), that pin down a context and ensure that it will keep its ASID after a rollover. Export the symbols to let the modular SMMUv3 driver use them. Pinning is necessary because a device constantly needs a valid ASID, unlike tasks that only require one when running. Without pinning, we would need to notify the IOMMU when we're about to use a new ASID for a task, and it would get complicated when a new task is assigned a shared ASID. Consider the following scenario with no ASID pinned: 1. Task t1 is running on CPUx with shared ASID (gen=1, asid=1) 2. Task t2 is scheduled on CPUx, gets ASID (1, 2) 3. Task tn is scheduled on CPUy, a rollover occurs, tn gets ASID (2, 1) We would now have to immediately generate a new ASID for t1, notify the IOMMU, and finally enable task tn. We are holding the lock during all that time, since we can't afford having another CPU trigger a rollover. The IOMMU issues invalidation commands that can take tens of milliseconds. It gets needlessly complicated. All we wanted to do was schedule task tn, that has no business with the IOMMU. By letting the IOMMU pin tasks when needed, we avoid stalling the slow path, and let the pinning fail when we're out of shareable ASIDs. After a rollover, the allocator expects at least one ASID to be available in addition to the reserved ones (one per CPU). So (NR_ASIDS - NR_CPUS - 1) is the maximum number of ASIDs that can be shared with the IOMMU. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20200918101852.582559-5-jean-philippe@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
2020-09-18 18:18:44 +08:00
if (arm64_kernel_unmapped_at_el0()) {
num_available_asids /= 2;
arm64: mm: Pin down ASIDs for sharing mm with devices To enable address space sharing with the IOMMU, introduce arm64_mm_context_get() and arm64_mm_context_put(), that pin down a context and ensure that it will keep its ASID after a rollover. Export the symbols to let the modular SMMUv3 driver use them. Pinning is necessary because a device constantly needs a valid ASID, unlike tasks that only require one when running. Without pinning, we would need to notify the IOMMU when we're about to use a new ASID for a task, and it would get complicated when a new task is assigned a shared ASID. Consider the following scenario with no ASID pinned: 1. Task t1 is running on CPUx with shared ASID (gen=1, asid=1) 2. Task t2 is scheduled on CPUx, gets ASID (1, 2) 3. Task tn is scheduled on CPUy, a rollover occurs, tn gets ASID (2, 1) We would now have to immediately generate a new ASID for t1, notify the IOMMU, and finally enable task tn. We are holding the lock during all that time, since we can't afford having another CPU trigger a rollover. The IOMMU issues invalidation commands that can take tens of milliseconds. It gets needlessly complicated. All we wanted to do was schedule task tn, that has no business with the IOMMU. By letting the IOMMU pin tasks when needed, we avoid stalling the slow path, and let the pinning fail when we're out of shareable ASIDs. After a rollover, the allocator expects at least one ASID to be available in addition to the reserved ones (one per CPU). So (NR_ASIDS - NR_CPUS - 1) is the maximum number of ASIDs that can be shared with the IOMMU. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20200918101852.582559-5-jean-philippe@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
2020-09-18 18:18:44 +08:00
if (pinned_asid_map)
set_kpti_asid_bits(pinned_asid_map);
}
/*
* Expect allocation after rollover to fail if we don't have at least
* one more ASID than CPUs. ASID #0 is reserved for init_mm.
*/
WARN_ON(num_available_asids - 1 <= num_possible_cpus());
pr_info("ASID allocator initialised with %lu entries\n",
num_available_asids);
arm64: mm: Pin down ASIDs for sharing mm with devices To enable address space sharing with the IOMMU, introduce arm64_mm_context_get() and arm64_mm_context_put(), that pin down a context and ensure that it will keep its ASID after a rollover. Export the symbols to let the modular SMMUv3 driver use them. Pinning is necessary because a device constantly needs a valid ASID, unlike tasks that only require one when running. Without pinning, we would need to notify the IOMMU when we're about to use a new ASID for a task, and it would get complicated when a new task is assigned a shared ASID. Consider the following scenario with no ASID pinned: 1. Task t1 is running on CPUx with shared ASID (gen=1, asid=1) 2. Task t2 is scheduled on CPUx, gets ASID (1, 2) 3. Task tn is scheduled on CPUy, a rollover occurs, tn gets ASID (2, 1) We would now have to immediately generate a new ASID for t1, notify the IOMMU, and finally enable task tn. We are holding the lock during all that time, since we can't afford having another CPU trigger a rollover. The IOMMU issues invalidation commands that can take tens of milliseconds. It gets needlessly complicated. All we wanted to do was schedule task tn, that has no business with the IOMMU. By letting the IOMMU pin tasks when needed, we avoid stalling the slow path, and let the pinning fail when we're out of shareable ASIDs. After a rollover, the allocator expects at least one ASID to be available in addition to the reserved ones (one per CPU). So (NR_ASIDS - NR_CPUS - 1) is the maximum number of ASIDs that can be shared with the IOMMU. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20200918101852.582559-5-jean-philippe@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
2020-09-18 18:18:44 +08:00
/*
* There must always be an ASID available after rollover. Ensure that,
* even if all CPUs have a reserved ASID and the maximum number of ASIDs
* are pinned, there still is at least one empty slot in the ASID map.
*/
max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
return 0;
}
arch_initcall(asids_update_limit);
static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
if (!asid_map)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
arm64: mm: Pin down ASIDs for sharing mm with devices To enable address space sharing with the IOMMU, introduce arm64_mm_context_get() and arm64_mm_context_put(), that pin down a context and ensure that it will keep its ASID after a rollover. Export the symbols to let the modular SMMUv3 driver use them. Pinning is necessary because a device constantly needs a valid ASID, unlike tasks that only require one when running. Without pinning, we would need to notify the IOMMU when we're about to use a new ASID for a task, and it would get complicated when a new task is assigned a shared ASID. Consider the following scenario with no ASID pinned: 1. Task t1 is running on CPUx with shared ASID (gen=1, asid=1) 2. Task t2 is scheduled on CPUx, gets ASID (1, 2) 3. Task tn is scheduled on CPUy, a rollover occurs, tn gets ASID (2, 1) We would now have to immediately generate a new ASID for t1, notify the IOMMU, and finally enable task tn. We are holding the lock during all that time, since we can't afford having another CPU trigger a rollover. The IOMMU issues invalidation commands that can take tens of milliseconds. It gets needlessly complicated. All we wanted to do was schedule task tn, that has no business with the IOMMU. By letting the IOMMU pin tasks when needed, we avoid stalling the slow path, and let the pinning fail when we're out of shareable ASIDs. After a rollover, the allocator expects at least one ASID to be available in addition to the reserved ones (one per CPU). So (NR_ASIDS - NR_CPUS - 1) is the maximum number of ASIDs that can be shared with the IOMMU. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20200918101852.582559-5-jean-philippe@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
2020-09-18 18:18:44 +08:00
nr_pinned_asids = 0;
/*
* We cannot call set_reserved_asid_bits() here because CPU
* caps are not finalized yet, so it is safer to assume KPTI
* and reserve kernel ASID's from beginning.
*/
if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
arm64: mm: Pin down ASIDs for sharing mm with devices To enable address space sharing with the IOMMU, introduce arm64_mm_context_get() and arm64_mm_context_put(), that pin down a context and ensure that it will keep its ASID after a rollover. Export the symbols to let the modular SMMUv3 driver use them. Pinning is necessary because a device constantly needs a valid ASID, unlike tasks that only require one when running. Without pinning, we would need to notify the IOMMU when we're about to use a new ASID for a task, and it would get complicated when a new task is assigned a shared ASID. Consider the following scenario with no ASID pinned: 1. Task t1 is running on CPUx with shared ASID (gen=1, asid=1) 2. Task t2 is scheduled on CPUx, gets ASID (1, 2) 3. Task tn is scheduled on CPUy, a rollover occurs, tn gets ASID (2, 1) We would now have to immediately generate a new ASID for t1, notify the IOMMU, and finally enable task tn. We are holding the lock during all that time, since we can't afford having another CPU trigger a rollover. The IOMMU issues invalidation commands that can take tens of milliseconds. It gets needlessly complicated. All we wanted to do was schedule task tn, that has no business with the IOMMU. By letting the IOMMU pin tasks when needed, we avoid stalling the slow path, and let the pinning fail when we're out of shareable ASIDs. After a rollover, the allocator expects at least one ASID to be available in addition to the reserved ones (one per CPU). So (NR_ASIDS - NR_CPUS - 1) is the maximum number of ASIDs that can be shared with the IOMMU. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20200918101852.582559-5-jean-philippe@linaro.org Signed-off-by: Will Deacon <will@kernel.org>
2020-09-18 18:18:44 +08:00
set_kpti_asid_bits(asid_map);
return 0;
}
early_initcall(asids_init);