arm64 fixes for -rc3
- Fix workaround for Cortex-A76 erratum #1286807 - Add workaround for AMU erratum #2457168 on Cortex-A510 - Drop reference to removed CONFIG_ARCH_RANDOM #define - Fix parsing of the "rodata=full" cmdline option - Fix a bunch of issues in the SME register state switching and sigframe code - Fix incorrect extraction of the CTR_EL0.CWG register field - Fix ACPI cache topology probing when the PPTT is not present - Trivial comment and whitespace fixes -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAmMIr4YQHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNOpcB/9PQM3DtbFcCFJeetFxJYVSPHikHfEj4SHY H6NRNLA9JwwCqA1/RKbJQDpFS34JdL5UgtNuUFbg4zkH2aaeML7WKuRg396jv2ke GmHpOkNeIaae0NQes3MLZQf+Heh2oimRYwlPXJc23vhAIZagwby3yR/9POrxKm6p kamLIrmwU1mU7pwr7HEkRr1HOd/eOJ+q6P3UYrFlAgAp5PEfVGgcdjbHJ1gY1KWw HwR6s2yWQ71+Aug9wMS56TQszBedyuIOeKezAI+IbThF6t/kQlfJna+hqvPp0zEE mz1hss7ACpfDURdga35B1bE146aZ1SKEsXXaB853JI76K6D534Y9 =3oRd -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Will Deacon: "A bumper crop of arm64 fixes for -rc3. The largest change is fixing our parsing of the 'rodata=full' command line option, which kstrtobool() started treating as 'rodata=false'. The fix actually makes the parsing of that option much less fragile and updates the documentation at the same time. We still have a boot issue pending when KASLR is disabled at compile time, but there's a fresh fix on the list which I'll send next week if it holds up to testing. Summary: - Fix workaround for Cortex-A76 erratum #1286807 - Add workaround for AMU erratum #2457168 on Cortex-A510 - Drop reference to removed CONFIG_ARCH_RANDOM #define - Fix parsing of the "rodata=full" cmdline option - Fix a bunch of issues in the SME register state switching and sigframe code - Fix incorrect extraction of the CTR_EL0.CWG register field - Fix ACPI cache topology probing when the PPTT is not present - Trivial comment and whitespace fixes" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64/sme: Don't flush SVE register state when handling SME traps arm64/sme: Don't flush SVE register state when allocating SME storage arm64/signal: Flush FPSIMD register state when disabling streaming mode arm64/signal: Raise limit on stack frames arm64/cache: Fix cache_type_cwg() for register generation arm64/sysreg: Guard SYS_FIELD_ macros for asm arm64/sysreg: Directly include bitfield.h arm64: cacheinfo: Fix incorrect assignment of signed error value to unsigned fw_level arm64: errata: add detection for AMEVCNTR01 incrementing incorrectly arm64: fix rodata=full arm64: Fix comment typo docs/arm64: elf_hwcaps: unify newlines in HWCAP lists arm64: adjust KASLR relocation after ARCH_RANDOM removal arm64: Fix match_list for erratum 1286807 on Arm Cortex-A76
This commit is contained in:
commit
e022620b5d
|
@ -5331,6 +5331,8 @@
|
|||
rodata= [KNL]
|
||||
on Mark read-only kernel memory as read-only (default).
|
||||
off Leave read-only kernel memory writable for debugging.
|
||||
full Mark read-only kernel memory and aliases as read-only
|
||||
[arm64]
|
||||
|
||||
rockchip.usb_uart
|
||||
Enable the uart passthrough on the designated usb port
|
||||
|
|
|
@ -242,44 +242,34 @@ HWCAP2_MTE3
|
|||
by Documentation/arm64/memory-tagging-extension.rst.
|
||||
|
||||
HWCAP2_SME
|
||||
|
||||
Functionality implied by ID_AA64PFR1_EL1.SME == 0b0001, as described
|
||||
by Documentation/arm64/sme.rst.
|
||||
|
||||
HWCAP2_SME_I16I64
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.I16I64 == 0b1111.
|
||||
|
||||
HWCAP2_SME_F64F64
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.F64F64 == 0b1.
|
||||
|
||||
HWCAP2_SME_I8I32
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.I8I32 == 0b1111.
|
||||
|
||||
HWCAP2_SME_F16F32
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.F16F32 == 0b1.
|
||||
|
||||
HWCAP2_SME_B16F32
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.B16F32 == 0b1.
|
||||
|
||||
HWCAP2_SME_F32F32
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.F32F32 == 0b1.
|
||||
|
||||
HWCAP2_SME_FA64
|
||||
|
||||
Functionality implied by ID_AA64SMFR0_EL1.FA64 == 0b1.
|
||||
|
||||
HWCAP2_WFXT
|
||||
|
||||
Functionality implied by ID_AA64ISAR2_EL1.WFXT == 0b0010.
|
||||
|
||||
HWCAP2_EBF16
|
||||
|
||||
Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0010.
|
||||
|
||||
4. Unused AT_HWCAP bits
|
||||
|
|
|
@ -52,6 +52,8 @@ stable kernels.
|
|||
| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2064142 | ARM64_ERRATUM_2064142 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2038923 | ARM64_ERRATUM_2038923 |
|
||||
|
|
|
@ -917,6 +917,23 @@ config ARM64_ERRATUM_1902691
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_2457168
|
||||
bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly"
|
||||
depends on ARM64_AMU_EXTN
|
||||
default y
|
||||
help
|
||||
This option adds the workaround for ARM Cortex-A510 erratum 2457168.
|
||||
|
||||
The AMU counter AMEVCNTR01 (constant counter) should increment at the same rate
|
||||
as the system counter. On affected Cortex-A510 cores AMEVCNTR01 increments
|
||||
incorrectly giving a significantly higher output value.
|
||||
|
||||
Work around this problem by returning 0 when reading the affected counter in
|
||||
key locations that results in disabling all users of this counter. This effect
|
||||
is the same to firmware disabling affected counters.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_ERRATUM_22375
|
||||
bool "Cavium erratum 22375, 24313"
|
||||
default y
|
||||
|
|
|
@ -71,7 +71,7 @@ static __always_inline int icache_is_vpipt(void)
|
|||
|
||||
static inline u32 cache_type_cwg(void)
|
||||
{
|
||||
return (read_cpuid_cachetype() >> CTR_EL0_CWG_SHIFT) & CTR_EL0_CWG_MASK;
|
||||
return SYS_FIELD_GET(CTR_EL0, CWG, read_cpuid_cachetype());
|
||||
}
|
||||
|
||||
#define __read_mostly __section(".data..read_mostly")
|
||||
|
|
|
@ -153,7 +153,7 @@ struct vl_info {
|
|||
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
|
||||
extern void sve_alloc(struct task_struct *task);
|
||||
extern void sve_alloc(struct task_struct *task, bool flush);
|
||||
extern void fpsimd_release_task(struct task_struct *task);
|
||||
extern void fpsimd_sync_to_sve(struct task_struct *task);
|
||||
extern void fpsimd_force_sync_to_sve(struct task_struct *task);
|
||||
|
@ -256,7 +256,7 @@ size_t sve_state_size(struct task_struct const *task);
|
|||
|
||||
#else /* ! CONFIG_ARM64_SVE */
|
||||
|
||||
static inline void sve_alloc(struct task_struct *task) { }
|
||||
static inline void sve_alloc(struct task_struct *task, bool flush) { }
|
||||
static inline void fpsimd_release_task(struct task_struct *task) { }
|
||||
static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
|
||||
static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
#ifndef __ARM64_ASM_SETUP_H
|
||||
#define __ARM64_ASM_SETUP_H
|
||||
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <uapi/asm/setup.h>
|
||||
|
||||
void *get_early_fdt_ptr(void);
|
||||
|
@ -14,4 +16,19 @@ void early_fdt_map(u64 dt_phys);
|
|||
extern phys_addr_t __fdt_pointer __initdata;
|
||||
extern u64 __cacheline_aligned boot_args[4];
|
||||
|
||||
static inline bool arch_parse_debug_rodata(char *arg)
|
||||
{
|
||||
extern bool rodata_enabled;
|
||||
extern bool rodata_full;
|
||||
|
||||
if (arg && !strcmp(arg, "full")) {
|
||||
rodata_enabled = true;
|
||||
rodata_full = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#define arch_parse_debug_rodata arch_parse_debug_rodata
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1116,6 +1116,7 @@
|
|||
|
||||
#else
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/build_bug.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/alternative.h>
|
||||
|
@ -1209,8 +1210,6 @@
|
|||
par; \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
#define SYS_FIELD_GET(reg, field, val) \
|
||||
FIELD_GET(reg##_##field##_MASK, val)
|
||||
|
||||
|
@ -1220,4 +1219,6 @@
|
|||
#define SYS_FIELD_PREP_ENUM(reg, field, val) \
|
||||
FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val)
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_SYSREG_H */
|
||||
|
|
|
@ -45,7 +45,8 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
|
|||
|
||||
int init_cache_level(unsigned int cpu)
|
||||
{
|
||||
unsigned int ctype, level, leaves, fw_level;
|
||||
unsigned int ctype, level, leaves;
|
||||
int fw_level;
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
|
||||
for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
|
||||
|
@ -63,6 +64,9 @@ int init_cache_level(unsigned int cpu)
|
|||
else
|
||||
fw_level = acpi_find_last_cache_level(cpu);
|
||||
|
||||
if (fw_level < 0)
|
||||
return fw_level;
|
||||
|
||||
if (level < fw_level) {
|
||||
/*
|
||||
* some external caches not specified in CLIDR_EL1
|
||||
|
|
|
@ -208,6 +208,8 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
|
|||
#ifdef CONFIG_ARM64_ERRATUM_1286807
|
||||
{
|
||||
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
|
||||
},
|
||||
{
|
||||
/* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
|
||||
ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
|
||||
},
|
||||
|
@ -654,6 +656,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2457168
|
||||
{
|
||||
.desc = "ARM erratum 2457168",
|
||||
.capability = ARM64_WORKAROUND_2457168,
|
||||
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
||||
|
||||
/* Cortex-A510 r0p0-r1p1 */
|
||||
CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2038923
|
||||
{
|
||||
.desc = "ARM erratum 2038923",
|
||||
|
|
|
@ -1870,7 +1870,10 @@ static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
|
|||
pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
|
||||
smp_processor_id());
|
||||
cpumask_set_cpu(smp_processor_id(), &amu_cpus);
|
||||
update_freq_counters_refs();
|
||||
|
||||
/* 0 reference values signal broken/disabled counters */
|
||||
if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168))
|
||||
update_freq_counters_refs();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -502,7 +502,7 @@ tsk .req x28 // current thread_info
|
|||
SYM_CODE_START(vectors)
|
||||
kernel_ventry 1, t, 64, sync // Synchronous EL1t
|
||||
kernel_ventry 1, t, 64, irq // IRQ EL1t
|
||||
kernel_ventry 1, t, 64, fiq // FIQ EL1h
|
||||
kernel_ventry 1, t, 64, fiq // FIQ EL1t
|
||||
kernel_ventry 1, t, 64, error // Error EL1t
|
||||
|
||||
kernel_ventry 1, h, 64, sync // Synchronous EL1h
|
||||
|
|
|
@ -715,10 +715,12 @@ size_t sve_state_size(struct task_struct const *task)
|
|||
* do_sve_acc() case, there is no ABI requirement to hide stale data
|
||||
* written previously be task.
|
||||
*/
|
||||
void sve_alloc(struct task_struct *task)
|
||||
void sve_alloc(struct task_struct *task, bool flush)
|
||||
{
|
||||
if (task->thread.sve_state) {
|
||||
memset(task->thread.sve_state, 0, sve_state_size(task));
|
||||
if (flush)
|
||||
memset(task->thread.sve_state, 0,
|
||||
sve_state_size(task));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1388,7 +1390,7 @@ void do_sve_acc(unsigned long esr, struct pt_regs *regs)
|
|||
return;
|
||||
}
|
||||
|
||||
sve_alloc(current);
|
||||
sve_alloc(current, true);
|
||||
if (!current->thread.sve_state) {
|
||||
force_sig(SIGKILL);
|
||||
return;
|
||||
|
@ -1439,7 +1441,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
|
|||
return;
|
||||
}
|
||||
|
||||
sve_alloc(current);
|
||||
sve_alloc(current, false);
|
||||
sme_alloc(current);
|
||||
if (!current->thread.sve_state || !current->thread.za_state) {
|
||||
force_sig(SIGKILL);
|
||||
|
@ -1460,17 +1462,6 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
|
|||
fpsimd_bind_task_to_cpu();
|
||||
}
|
||||
|
||||
/*
|
||||
* If SVE was not already active initialise the SVE registers,
|
||||
* any non-shared state between the streaming and regular SVE
|
||||
* registers is architecturally guaranteed to be zeroed when
|
||||
* we enter streaming mode. We do not need to initialize ZA
|
||||
* since ZA must be disabled at this point and enabling ZA is
|
||||
* architecturally defined to zero ZA.
|
||||
*/
|
||||
if (system_supports_sve() && !test_thread_flag(TIF_SVE))
|
||||
sve_init_regs();
|
||||
|
||||
put_cpu_fpsimd_context();
|
||||
}
|
||||
|
||||
|
|
|
@ -94,11 +94,9 @@ asmlinkage u64 kaslr_early_init(void *fdt)
|
|||
|
||||
seed = get_kaslr_seed(fdt);
|
||||
if (!seed) {
|
||||
#ifdef CONFIG_ARCH_RANDOM
|
||||
if (!__early_cpu_has_rndr() ||
|
||||
!__arm64_rndr((unsigned long *)&seed))
|
||||
#endif
|
||||
return 0;
|
||||
if (!__early_cpu_has_rndr() ||
|
||||
!__arm64_rndr((unsigned long *)&seed))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -882,7 +882,7 @@ static int sve_set_common(struct task_struct *target,
|
|||
* state and ensure there's storage.
|
||||
*/
|
||||
if (target->thread.svcr != old_svcr)
|
||||
sve_alloc(target);
|
||||
sve_alloc(target, true);
|
||||
}
|
||||
|
||||
/* Registers: FPSIMD-only case */
|
||||
|
@ -912,7 +912,7 @@ static int sve_set_common(struct task_struct *target,
|
|||
goto out;
|
||||
}
|
||||
|
||||
sve_alloc(target);
|
||||
sve_alloc(target, true);
|
||||
if (!target->thread.sve_state) {
|
||||
ret = -ENOMEM;
|
||||
clear_tsk_thread_flag(target, TIF_SVE);
|
||||
|
@ -1082,7 +1082,7 @@ static int za_set(struct task_struct *target,
|
|||
|
||||
/* Ensure there is some SVE storage for streaming mode */
|
||||
if (!target->thread.sve_state) {
|
||||
sve_alloc(target);
|
||||
sve_alloc(target, false);
|
||||
if (!target->thread.sve_state) {
|
||||
clear_thread_flag(TIF_SME);
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -91,7 +91,7 @@ static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
|
|||
* not taken into account. This limit is not a guarantee and is
|
||||
* NOT ABI.
|
||||
*/
|
||||
#define SIGFRAME_MAXSZ SZ_64K
|
||||
#define SIGFRAME_MAXSZ SZ_256K
|
||||
|
||||
static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
|
||||
unsigned long *offset, size_t size, bool extend)
|
||||
|
@ -310,7 +310,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
|
|||
fpsimd_flush_task_state(current);
|
||||
/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
|
||||
|
||||
sve_alloc(current);
|
||||
sve_alloc(current, true);
|
||||
if (!current->thread.sve_state) {
|
||||
clear_thread_flag(TIF_SVE);
|
||||
return -ENOMEM;
|
||||
|
@ -926,6 +926,16 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
|
|||
|
||||
/* Signal handlers are invoked with ZA and streaming mode disabled */
|
||||
if (system_supports_sme()) {
|
||||
/*
|
||||
* If we were in streaming mode the saved register
|
||||
* state was SVE but we will exit SM and use the
|
||||
* FPSIMD register state - flush the saved FPSIMD
|
||||
* register state in case it gets loaded.
|
||||
*/
|
||||
if (current->thread.svcr & SVCR_SM_MASK)
|
||||
memset(¤t->thread.uw.fpsimd_state, 0,
|
||||
sizeof(current->thread.uw.fpsimd_state));
|
||||
|
||||
current->thread.svcr &= ~(SVCR_ZA_MASK |
|
||||
SVCR_SM_MASK);
|
||||
sme_smstop();
|
||||
|
|
|
@ -296,12 +296,25 @@ core_initcall(init_amu_fie);
|
|||
|
||||
static void cpu_read_corecnt(void *val)
|
||||
{
|
||||
/*
|
||||
* A value of 0 can be returned if the current CPU does not support AMUs
|
||||
* or if the counter is disabled for this CPU. A return value of 0 at
|
||||
* counter read is properly handled as an error case by the users of the
|
||||
* counter.
|
||||
*/
|
||||
*(u64 *)val = read_corecnt();
|
||||
}
|
||||
|
||||
static void cpu_read_constcnt(void *val)
|
||||
{
|
||||
*(u64 *)val = read_constcnt();
|
||||
/*
|
||||
* Return 0 if the current CPU is affected by erratum 2457168. A value
|
||||
* of 0 is also returned if the current CPU does not support AMUs or if
|
||||
* the counter is disabled. A return value of 0 at counter read is
|
||||
* properly handled as an error case by the users of the counter.
|
||||
*/
|
||||
*(u64 *)val = this_cpu_has_cap(ARM64_WORKAROUND_2457168) ?
|
||||
0UL : read_constcnt();
|
||||
}
|
||||
|
||||
static inline
|
||||
|
@ -328,7 +341,22 @@ int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val)
|
|||
*/
|
||||
bool cpc_ffh_supported(void)
|
||||
{
|
||||
return freq_counters_valid(get_cpu_with_amu_feat());
|
||||
int cpu = get_cpu_with_amu_feat();
|
||||
|
||||
/*
|
||||
* FFH is considered supported if there is at least one present CPU that
|
||||
* supports AMUs. Using FFH to read core and reference counters for CPUs
|
||||
* that do not support AMUs, have counters disabled or that are affected
|
||||
* by errata, will result in a return value of 0.
|
||||
*
|
||||
* This is done to allow any enabled and valid counters to be read
|
||||
* through FFH, knowing that potentially returning 0 as counter value is
|
||||
* properly handled by the users of these counters.
|
||||
*/
|
||||
if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
|
||||
|
|
|
@ -642,24 +642,6 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
|
|||
vm_area_add_early(vma);
|
||||
}
|
||||
|
||||
static int __init parse_rodata(char *arg)
|
||||
{
|
||||
int ret = strtobool(arg, &rodata_enabled);
|
||||
if (!ret) {
|
||||
rodata_full = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* permit 'full' in addition to boolean options */
|
||||
if (strcmp(arg, "full"))
|
||||
return -EINVAL;
|
||||
|
||||
rodata_enabled = true;
|
||||
rodata_full = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("rodata", parse_rodata);
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
static int __init map_entry_trampoline(void)
|
||||
{
|
||||
|
|
|
@ -67,6 +67,7 @@ WORKAROUND_1902691
|
|||
WORKAROUND_2038923
|
||||
WORKAROUND_2064142
|
||||
WORKAROUND_2077057
|
||||
WORKAROUND_2457168
|
||||
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
|
||||
WORKAROUND_TSB_FLUSH_FAILURE
|
||||
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
|
||||
|
|
18
init/main.c
18
init/main.c
|
@ -1446,13 +1446,25 @@ static noinline void __init kernel_init_freeable(void);
|
|||
|
||||
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
|
||||
bool rodata_enabled __ro_after_init = true;
|
||||
|
||||
#ifndef arch_parse_debug_rodata
|
||||
static inline bool arch_parse_debug_rodata(char *str) { return false; }
|
||||
#endif
|
||||
|
||||
static int __init set_debug_rodata(char *str)
|
||||
{
|
||||
if (strtobool(str, &rodata_enabled))
|
||||
if (arch_parse_debug_rodata(str))
|
||||
return 0;
|
||||
|
||||
if (str && !strcmp(str, "on"))
|
||||
rodata_enabled = true;
|
||||
else if (str && !strcmp(str, "off"))
|
||||
rodata_enabled = false;
|
||||
else
|
||||
pr_warn("Invalid option string for rodata: '%s'\n", str);
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
__setup("rodata=", set_debug_rodata);
|
||||
early_param("rodata", set_debug_rodata);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_STRICT_KERNEL_RWX
|
||||
|
|
Loading…
Reference in New Issue