More arm64 fixes:

- Fix application of read-only permissions to kernel section mappings
 
 - Sanitise reported ESR values for signals delivered on a kernel address
 
 - Ensure tishift GCC helpers are exported to modules
 
 - Fix inline asm constraints for some LSE atomics
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABCgAGBQJbB/1rAAoJELescNyEwWM03oIIAKVMZ6jBQFq41H+VUw7lDBMc
 USEzqa0hEUsaWiZW8N9penAhY2a5saYQX5srVTXy9C2JzjQ0Tc5d7BCKfc+NSjO3
 OBlBNVPqwbyYwfMrNWjOVxkOHrk04gF9b6j8hwUa2g7ioWdjyP37fsh+T0pDsazM
 yKJt9bkjEdDDFAFqYIohEBF0LR6zXpWpCMxzZ8lcl4KcDfd85y8YBhOu211QXQoC
 9PMDF9V9GzPBweAGiiET8Z0EPb5j0sCFTjNIIUiZLiP5SC7VTVJz2BqTIvolJE3o
 zLCPfJOCd34KbVx0S8lhPiJo652njlm2ahN5vLtVaLHQCMTAimaeYjTD7ye8jKc=
 =Um8e
 -----END PGP SIGNATURE-----

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull more arm64 fixes from Will Deacon:

 - fix application of read-only permissions to kernel section mappings

 - sanitise reported ESR values for signals delivered on a kernel
   address

 - ensure tishift GCC helpers are exported to modules

 - fix inline asm constraints for some LSE atomics

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: Make sure permission updates happen for pmd/pud
  arm64: fault: Don't leak data in ESR context for user fault on kernel VA
  arm64: export tishift functions to modules
  arm64: lse: Add early clobbers to some input/output asm operands
This commit is contained in:
Linus Torvalds 2018-05-25 09:35:11 -07:00
commit 62d18ecfa6
5 changed files with 83 additions and 31 deletions

View File

@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v)
/* LSE atomics */ /* LSE atomics */
" mvn %w[i], %w[i]\n" " mvn %w[i], %w[i]\n"
" stclr %w[i], %[v]") " stclr %w[i], %[v]")
: [i] "+r" (w0), [v] "+Q" (v->counter) : [i] "+&r" (w0), [v] "+Q" (v->counter)
: "r" (x1) : "r" (x1)
: __LL_SC_CLOBBERS); : __LL_SC_CLOBBERS);
} }
@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \
/* LSE atomics */ \ /* LSE atomics */ \
" mvn %w[i], %w[i]\n" \ " mvn %w[i], %w[i]\n" \
" ldclr" #mb " %w[i], %w[i], %[v]") \ " ldclr" #mb " %w[i], %w[i], %[v]") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \ : [i] "+&r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \ : "r" (x1) \
: __LL_SC_CLOBBERS, ##cl); \ : __LL_SC_CLOBBERS, ##cl); \
\ \
@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v)
/* LSE atomics */ /* LSE atomics */
" neg %w[i], %w[i]\n" " neg %w[i], %w[i]\n"
" stadd %w[i], %[v]") " stadd %w[i], %[v]")
: [i] "+r" (w0), [v] "+Q" (v->counter) : [i] "+&r" (w0), [v] "+Q" (v->counter)
: "r" (x1) : "r" (x1)
: __LL_SC_CLOBBERS); : __LL_SC_CLOBBERS);
} }
@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
" neg %w[i], %w[i]\n" \ " neg %w[i], %w[i]\n" \
" ldadd" #mb " %w[i], w30, %[v]\n" \ " ldadd" #mb " %w[i], w30, %[v]\n" \
" add %w[i], %w[i], w30") \ " add %w[i], %w[i], w30") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \ : [i] "+&r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \ : "r" (x1) \
: __LL_SC_CLOBBERS , ##cl); \ : __LL_SC_CLOBBERS , ##cl); \
\ \
@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
/* LSE atomics */ \ /* LSE atomics */ \
" neg %w[i], %w[i]\n" \ " neg %w[i], %w[i]\n" \
" ldadd" #mb " %w[i], %w[i], %[v]") \ " ldadd" #mb " %w[i], %w[i], %[v]") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \ : [i] "+&r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \ : "r" (x1) \
: __LL_SC_CLOBBERS, ##cl); \ : __LL_SC_CLOBBERS, ##cl); \
\ \
@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
/* LSE atomics */ /* LSE atomics */
" mvn %[i], %[i]\n" " mvn %[i], %[i]\n"
" stclr %[i], %[v]") " stclr %[i], %[v]")
: [i] "+r" (x0), [v] "+Q" (v->counter) : [i] "+&r" (x0), [v] "+Q" (v->counter)
: "r" (x1) : "r" (x1)
: __LL_SC_CLOBBERS); : __LL_SC_CLOBBERS);
} }
@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
/* LSE atomics */ \ /* LSE atomics */ \
" mvn %[i], %[i]\n" \ " mvn %[i], %[i]\n" \
" ldclr" #mb " %[i], %[i], %[v]") \ " ldclr" #mb " %[i], %[i], %[v]") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \ : [i] "+&r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \ : "r" (x1) \
: __LL_SC_CLOBBERS, ##cl); \ : __LL_SC_CLOBBERS, ##cl); \
\ \
@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
/* LSE atomics */ /* LSE atomics */
" neg %[i], %[i]\n" " neg %[i], %[i]\n"
" stadd %[i], %[v]") " stadd %[i], %[v]")
: [i] "+r" (x0), [v] "+Q" (v->counter) : [i] "+&r" (x0), [v] "+Q" (v->counter)
: "r" (x1) : "r" (x1)
: __LL_SC_CLOBBERS); : __LL_SC_CLOBBERS);
} }
@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
" neg %[i], %[i]\n" \ " neg %[i], %[i]\n" \
" ldadd" #mb " %[i], x30, %[v]\n" \ " ldadd" #mb " %[i], x30, %[v]\n" \
" add %[i], %[i], x30") \ " add %[i], %[i], x30") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \ : [i] "+&r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \ : "r" (x1) \
: __LL_SC_CLOBBERS, ##cl); \ : __LL_SC_CLOBBERS, ##cl); \
\ \
@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
/* LSE atomics */ \ /* LSE atomics */ \
" neg %[i], %[i]\n" \ " neg %[i], %[i]\n" \
" ldadd" #mb " %[i], %[i], %[v]") \ " ldadd" #mb " %[i], %[i], %[v]") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \ : [i] "+&r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \ : "r" (x1) \
: __LL_SC_CLOBBERS, ##cl); \ : __LL_SC_CLOBBERS, ##cl); \
\ \
@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
" sub x30, x30, %[ret]\n" " sub x30, x30, %[ret]\n"
" cbnz x30, 1b\n" " cbnz x30, 1b\n"
"2:") "2:")
: [ret] "+r" (x0), [v] "+Q" (v->counter) : [ret] "+&r" (x0), [v] "+Q" (v->counter)
: :
: __LL_SC_CLOBBERS, "cc", "memory"); : __LL_SC_CLOBBERS, "cc", "memory");
@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
" eor %[old1], %[old1], %[oldval1]\n" \ " eor %[old1], %[old1], %[oldval1]\n" \
" eor %[old2], %[old2], %[oldval2]\n" \ " eor %[old2], %[old2], %[oldval2]\n" \
" orr %[old1], %[old1], %[old2]") \ " orr %[old1], %[old1], %[old2]") \
: [old1] "+r" (x0), [old2] "+r" (x1), \ : [old1] "+&r" (x0), [old2] "+&r" (x1), \
[v] "+Q" (*(unsigned long *)ptr) \ [v] "+Q" (*(unsigned long *)ptr) \
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \

View File

@ -75,3 +75,11 @@ NOKPROBE_SYMBOL(_mcount);
/* arm-smccc */ /* arm-smccc */
EXPORT_SYMBOL(__arm_smccc_smc); EXPORT_SYMBOL(__arm_smccc_smc);
EXPORT_SYMBOL(__arm_smccc_hvc); EXPORT_SYMBOL(__arm_smccc_hvc);
/* tishift.S */
extern long long __ashlti3(long long a, int b);
EXPORT_SYMBOL(__ashlti3);
extern long long __ashrti3(long long a, int b);
EXPORT_SYMBOL(__ashrti3);
extern long long __lshrti3(long long a, int b);
EXPORT_SYMBOL(__lshrti3);

View File

@ -1,17 +1,6 @@
/* /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
* Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
* *
* This program is free software; you can redistribute it and/or modify * Copyright (C) 2017-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>

View File

@ -293,6 +293,57 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
static void __do_user_fault(struct siginfo *info, unsigned int esr) static void __do_user_fault(struct siginfo *info, unsigned int esr)
{ {
current->thread.fault_address = (unsigned long)info->si_addr; current->thread.fault_address = (unsigned long)info->si_addr;
/*
* If the faulting address is in the kernel, we must sanitize the ESR.
* From userspace's point of view, kernel-only mappings don't exist
* at all, so we report them as level 0 translation faults.
* (This is not quite the way that "no mapping there at all" behaves:
* an alignment fault not caused by the memory type would take
* precedence over translation fault for a real access to empty
* space. Unfortunately we can't easily distinguish "alignment fault
* not caused by memory type" from "alignment fault caused by memory
* type", so we ignore this wrinkle and just return the translation
* fault.)
*/
if (current->thread.fault_address >= TASK_SIZE) {
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_DABT_LOW:
/*
* These bits provide only information about the
* faulting instruction, which userspace knows already.
* We explicitly clear bits which are architecturally
* RES0 in case they are given meanings in future.
* We always report the ESR as if the fault was taken
* to EL1 and so ISV and the bits in ISS[23:14] are
* clear. (In fact it always will be a fault to EL1.)
*/
esr &= ESR_ELx_EC_MASK | ESR_ELx_IL |
ESR_ELx_CM | ESR_ELx_WNR;
esr |= ESR_ELx_FSC_FAULT;
break;
case ESR_ELx_EC_IABT_LOW:
/*
* Claim a level 0 translation fault.
* All other bits are architecturally RES0 for faults
* reported with that DFSC value, so we clear them.
*/
esr &= ESR_ELx_EC_MASK | ESR_ELx_IL;
esr |= ESR_ELx_FSC_FAULT;
break;
default:
/*
* This should never happen (entry.S only brings us
* into this code for insn and data aborts from a lower
* exception level). Fail safe by not providing an ESR
* context record at all.
*/
WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr);
esr = 0;
break;
}
}
current->thread.fault_code = esr; current->thread.fault_code = esr;
arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current); arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current);
} }

View File

@ -933,13 +933,15 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
{ {
pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
pgprot_val(mk_sect_prot(prot))); pgprot_val(mk_sect_prot(prot)));
pud_t new_pud = pfn_pud(__phys_to_pfn(phys), sect_prot);
/* ioremap_page_range doesn't honour BBM */ /* Only allow permission changes for now */
if (pud_present(READ_ONCE(*pudp))) if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
pud_val(new_pud)))
return 0; return 0;
BUG_ON(phys & ~PUD_MASK); BUG_ON(phys & ~PUD_MASK);
set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot)); set_pud(pudp, new_pud);
return 1; return 1;
} }
@ -947,13 +949,15 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
{ {
pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT | pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
pgprot_val(mk_sect_prot(prot))); pgprot_val(mk_sect_prot(prot)));
pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), sect_prot);
/* ioremap_page_range doesn't honour BBM */ /* Only allow permission changes for now */
if (pmd_present(READ_ONCE(*pmdp))) if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
pmd_val(new_pmd)))
return 0; return 0;
BUG_ON(phys & ~PMD_MASK); BUG_ON(phys & ~PMD_MASK);
set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot)); set_pmd(pmdp, new_pmd);
return 1; return 1;
} }