[scudo] Make MTE inline asm compatible with GNU assembler

The GNU assembler can't parse `.arch_extension ...` before a `;`.
So instead uniformly use raw string syntax with separate lines
instead of `;` separators in the assembly code.

Reviewed By: pcc

Differential Revision: https://reviews.llvm.org/D100413
This commit is contained in:
Roland McGrath 2021-04-13 15:03:54 -07:00
parent 0f0149b8d3
commit 5c500c9f01
1 changed files with 60 additions and 24 deletions

View File

@ -96,11 +96,19 @@ inline bool systemDetectsMemoryTagFaultsTestOnly() { return false; }
#endif // SCUDO_LINUX
inline void disableMemoryTagChecksTestOnly() {
__asm__ __volatile__(".arch_extension memtag; msr tco, #1");
__asm__ __volatile__(
R"(
.arch_extension memtag
msr tco, #1
)");
}
inline void enableMemoryTagChecksTestOnly() {
__asm__ __volatile__(".arch_extension memtag; msr tco, #0");
__asm__ __volatile__(
R"(
.arch_extension memtag
msr tco, #0
)");
}
class ScopedDisableMemoryTagChecks {
@ -108,21 +116,33 @@ class ScopedDisableMemoryTagChecks {
public:
ScopedDisableMemoryTagChecks() {
__asm__ __volatile__(".arch_extension memtag; mrs %0, tco; msr tco, #1"
: "=r"(PrevTCO));
__asm__ __volatile__(
R"(
.arch_extension memtag
mrs %0, tco
msr tco, #1
)"
: "=r"(PrevTCO));
}
~ScopedDisableMemoryTagChecks() {
__asm__ __volatile__(".arch_extension memtag; msr tco, %0"
:
: "r"(PrevTCO));
__asm__ __volatile__(
R"(
.arch_extension memtag
msr tco, %0
)"
:
: "r"(PrevTCO));
}
};
inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
uptr TaggedPtr;
__asm__ __volatile__(
".arch_extension memtag; irg %[TaggedPtr], %[Ptr], %[ExcludeMask]"
R"(
.arch_extension memtag
irg %[TaggedPtr], %[Ptr], %[ExcludeMask]
)"
: [TaggedPtr] "=r"(TaggedPtr)
: [Ptr] "r"(Ptr), [ExcludeMask] "r"(ExcludeMask));
return TaggedPtr;
@ -156,10 +176,14 @@ inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
// chunk holding a low alignment allocation is reused for a higher alignment
// allocation, the chunk may already have a non-zero tag from the previous
// allocation.
__asm__ __volatile__(".arch_extension memtag; stg %0, [%0, #-16]"
:
: "r"(Ptr)
: "memory");
__asm__ __volatile__(
R"(
.arch_extension memtag
stg %0, [%0, #-16]
)"
:
: "r"(Ptr)
: "memory");
uptr TaggedBegin, TaggedEnd;
setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
@ -173,10 +197,14 @@ inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
// purpose of catching linear overflows in this case.
uptr UntaggedEnd = untagPointer(TaggedEnd);
if (UntaggedEnd != BlockEnd)
__asm__ __volatile__(".arch_extension memtag; stg %0, [%0]"
:
: "r"(UntaggedEnd)
: "memory");
__asm__ __volatile__(
R"(
.arch_extension memtag
stg %0, [%0]
)"
:
: "r"(UntaggedEnd)
: "memory");
return reinterpret_cast<void *>(TaggedBegin);
}
@ -187,10 +215,14 @@ inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
// of the allocation to 0. See explanation in prepareTaggedChunk above.
uptr RoundNewPtr = untagPointer(roundUpTo(NewPtr, 16));
if (RoundNewPtr != BlockEnd)
__asm__ __volatile__(".arch_extension memtag; stg %0, [%0]"
:
: "r"(RoundNewPtr)
: "memory");
__asm__ __volatile__(
R"(
.arch_extension memtag
stg %0, [%0]
)"
:
: "r"(RoundNewPtr)
: "memory");
return;
}
@ -220,10 +252,14 @@ inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
inline uptr loadTag(uptr Ptr) {
uptr TaggedPtr = Ptr;
__asm__ __volatile__(".arch_extension memtag; ldg %0, [%0]"
: "+r"(TaggedPtr)
:
: "memory");
__asm__ __volatile__(
R"(
.arch_extension memtag
ldg %0, [%0]
)"
: "+r"(TaggedPtr)
:
: "memory");
return TaggedPtr;
}