[ASan] Replace IR based callbacks with shared assembly code callbacks.

This change moves optimized callbacks from each .o file to compiler-rt.

Reviewed By: vitalybuka, morehouse

Differential Revision: https://reviews.llvm.org/D115396
This commit is contained in:
Kirill Stoimenov 2021-12-13 16:10:58 +00:00
parent 240be6541d
commit 89577be895
5 changed files with 31 additions and 671 deletions

View File

@ -754,8 +754,6 @@ static void emitNonLazyStubs(MachineModuleInfo *MMI, MCStreamer &OutStreamer) {
void X86AsmPrinter::emitEndOfAsmFile(Module &M) {
const Triple &TT = TM.getTargetTriple();
emitAsanMemaccessSymbols(M);
if (TT.isOSBinFormatMachO()) {
// Mach-O uses non-lazy symbol stubs to encode per-TU information into
// global table for symbol lookup.

View File

@ -100,20 +100,6 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
// Address sanitizer specific lowering for X86.
void LowerASAN_CHECK_MEMACCESS(const MachineInstr &MI);
void emitAsanMemaccessSymbols(Module &M);
void emitAsanMemaccessPartial(Module &M, unsigned Reg,
const ASanAccessInfo &AccessInfo,
MCSubtargetInfo &STI);
void emitAsanMemaccessFull(Module &M, unsigned Reg,
const ASanAccessInfo &AccessInfo,
MCSubtargetInfo &STI);
void emitAsanReportError(Module &M, unsigned Reg,
const ASanAccessInfo &AccessInfo,
MCSubtargetInfo &STI);
typedef std::tuple<unsigned /*Reg*/, uint32_t /*AccessInfo*/>
AsanMemaccessTuple;
std::map<AsanMemaccessTuple, MCSymbol *> AsanMemaccessSymbols;
// Choose between emitting .seh_ directives and .cv_fpo_ directives.
void EmitSEHInstruction(const MachineInstr *MI);

View File

@ -48,6 +48,7 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
#include <string>
using namespace llvm;
@ -1336,235 +1337,29 @@ void X86AsmPrinter::LowerASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
return;
}
unsigned Reg = MI.getOperand(0).getReg().id();
const auto &Reg = MI.getOperand(0).getReg();
ASanAccessInfo AccessInfo(MI.getOperand(1).getImm());
MCSymbol *&Sym =
AsanMemaccessSymbols[AsanMemaccessTuple(Reg, AccessInfo.Packed)];
if (!Sym) {
std::string Name = AccessInfo.IsWrite ? "store" : "load";
std::string SymName = "__asan_check_" + Name +
utostr(1ULL << AccessInfo.AccessSizeIndex) + "_rn" +
utostr(Reg);
Sym = OutContext.getOrCreateSymbol(SymName);
}
uint64_t ShadowBase;
int MappingScale;
bool OrShadowOffset;
getAddressSanitizerParams(Triple(TM.getTargetTriple()), 64,
AccessInfo.CompileKernel, &ShadowBase,
&MappingScale, &OrShadowOffset);
std::string Name = AccessInfo.IsWrite ? "store" : "load";
std::string Op = OrShadowOffset ? "or" : "add";
std::string SymName = "__asan_check_" + Name + "_" + Op + "_" +
utostr(1ULL << AccessInfo.AccessSizeIndex) + "_" +
TM.getMCRegisterInfo()->getName(Reg.asMCReg());
if (OrShadowOffset)
report_fatal_error(
"OrShadowOffset is not supported with optimized callbacks");
EmitAndCountInstruction(
MCInstBuilder(X86::CALL64pcrel32)
.addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
}
void X86AsmPrinter::emitAsanMemaccessPartial(Module &M, unsigned Reg,
const ASanAccessInfo &AccessInfo,
MCSubtargetInfo &STI) {
assert(AccessInfo.AccessSizeIndex == 0 || AccessInfo.AccessSizeIndex == 1 ||
AccessInfo.AccessSizeIndex == 2);
assert(Reg != X86::R10);
assert(Reg != X86::R11);
uint64_t ShadowBase;
int MappingScale;
bool OrShadowOffset;
getAddressSanitizerParams(
Triple(M.getTargetTriple()), M.getDataLayout().getPointerSizeInBits(),
AccessInfo.CompileKernel, &ShadowBase, &MappingScale, &OrShadowOffset);
OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr)
.addReg(X86::R10)
.addReg(X86::NoRegister + Reg),
STI);
OutStreamer->emitInstruction(MCInstBuilder(X86::SHR64ri)
.addReg(X86::R10)
.addReg(X86::R10)
.addImm(MappingScale),
STI);
if (OrShadowOffset) {
OutStreamer->emitInstruction(MCInstBuilder(X86::OR64ri32)
.addReg(X86::R10)
.addReg(X86::R10)
.addImm(ShadowBase),
STI);
OutStreamer->emitInstruction(MCInstBuilder(X86::MOV8rm)
.addReg(X86::R10B)
.addReg(X86::R10)
.addImm(1)
.addReg(X86::NoRegister)
.addImm(0)
.addReg(X86::NoRegister),
STI);
OutStreamer->emitInstruction(
MCInstBuilder(X86::TEST8rr).addReg(X86::R10B).addReg(X86::R10B), STI);
} else {
OutStreamer->emitInstruction(MCInstBuilder(X86::MOVSX32rm8)
.addReg(X86::R10D)
.addReg(X86::R10)
.addImm(1)
.addReg(X86::NoRegister)
.addImm(ShadowBase)
.addReg(X86::NoRegister),
STI);
OutStreamer->emitInstruction(
MCInstBuilder(X86::TEST32rr).addReg(X86::R10D).addReg(X86::R10D), STI);
}
MCSymbol *AdditionalCheck = OutContext.createTempSymbol();
OutStreamer->emitInstruction(
MCInstBuilder(X86::JCC_1)
.addExpr(MCSymbolRefExpr::create(AdditionalCheck, OutContext))
.addImm(X86::COND_NE),
STI);
MCSymbol *ReturnSym = OutContext.createTempSymbol();
OutStreamer->emitLabel(ReturnSym);
OutStreamer->emitInstruction(MCInstBuilder(getRetOpcode(*Subtarget)), STI);
// Shadow byte is non-zero so we need to perform additional checks.
OutStreamer->emitLabel(AdditionalCheck);
OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr)
.addReg(X86::R11)
.addReg(X86::NoRegister + Reg),
STI);
const size_t Granularity = 1ULL << MappingScale;
OutStreamer->emitInstruction(MCInstBuilder(X86::AND32ri8)
.addReg(X86::NoRegister)
.addReg(X86::R11D)
.addImm(Granularity - 1),
STI);
if (AccessInfo.AccessSizeIndex == 1) {
OutStreamer->emitInstruction(MCInstBuilder(X86::ADD32ri8)
.addReg(X86::NoRegister)
.addReg(X86::R11D)
.addImm(1),
STI);
} else if (AccessInfo.AccessSizeIndex == 2) {
OutStreamer->emitInstruction(MCInstBuilder(X86::ADD32ri8)
.addReg(X86::NoRegister)
.addReg(X86::R11D)
.addImm(3),
STI);
}
OutStreamer->emitInstruction(
MCInstBuilder(X86::CMP32rr).addReg(X86::R11D).addReg(X86::R10D).addImm(1),
STI);
OutStreamer->emitInstruction(
MCInstBuilder(X86::JCC_1)
.addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext))
.addImm(X86::COND_L),
STI);
emitAsanReportError(M, Reg, AccessInfo, STI);
}
void X86AsmPrinter::emitAsanMemaccessFull(Module &M, unsigned Reg,
const ASanAccessInfo &AccessInfo,
MCSubtargetInfo &STI) {
assert(AccessInfo.AccessSizeIndex == 3 || AccessInfo.AccessSizeIndex == 4);
assert(Reg != X86::R10);
assert(Reg != X86::R11);
uint64_t ShadowBase;
int MappingScale;
bool OrShadowOffset;
getAddressSanitizerParams(
Triple(M.getTargetTriple()), M.getDataLayout().getPointerSizeInBits(),
AccessInfo.CompileKernel, &ShadowBase, &MappingScale, &OrShadowOffset);
OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr)
.addReg(X86::R10)
.addReg(X86::NoRegister + Reg),
STI);
OutStreamer->emitInstruction(MCInstBuilder(X86::SHR64ri)
.addReg(X86::R10)
.addReg(X86::R10)
.addImm(MappingScale),
STI);
if (OrShadowOffset) {
OutStreamer->emitInstruction(MCInstBuilder(X86::OR64ri32)
.addReg(X86::R10)
.addReg(X86::R10)
.addImm(ShadowBase),
STI);
auto OpCode = AccessInfo.AccessSizeIndex == 3 ? X86::CMP8mi : X86::CMP16mi8;
OutStreamer->emitInstruction(MCInstBuilder(OpCode)
.addReg(X86::R10)
.addImm(1)
.addReg(X86::NoRegister)
.addImm(0)
.addReg(X86::NoRegister)
.addImm(0),
STI);
} else {
auto OpCode = AccessInfo.AccessSizeIndex == 3 ? X86::CMP8mi : X86::CMP16mi8;
OutStreamer->emitInstruction(MCInstBuilder(OpCode)
.addReg(X86::R10)
.addImm(1)
.addReg(X86::NoRegister)
.addImm(ShadowBase)
.addReg(X86::NoRegister)
.addImm(0),
STI);
}
MCSymbol *ReportCode = OutContext.createTempSymbol();
OutStreamer->emitInstruction(
MCInstBuilder(X86::JCC_1)
.addExpr(MCSymbolRefExpr::create(ReportCode, OutContext))
.addImm(X86::COND_NE),
STI);
MCSymbol *ReturnSym = OutContext.createTempSymbol();
OutStreamer->emitLabel(ReturnSym);
OutStreamer->emitInstruction(MCInstBuilder(getRetOpcode(*Subtarget)), STI);
OutStreamer->emitLabel(ReportCode);
emitAsanReportError(M, Reg, AccessInfo, STI);
}
void X86AsmPrinter::emitAsanReportError(Module &M, unsigned Reg,
const ASanAccessInfo &AccessInfo,
MCSubtargetInfo &STI) {
std::string Name = AccessInfo.IsWrite ? "store" : "load";
MCSymbol *ReportError = OutContext.getOrCreateSymbol(
"__asan_report_" + Name + utostr(1ULL << AccessInfo.AccessSizeIndex));
OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr)
.addReg(X86::RDI)
.addReg(X86::NoRegister + Reg),
STI);
OutStreamer->emitInstruction(
MCInstBuilder(X86::JMP_4)
.addExpr(MCSymbolRefExpr::create(ReportError, MCSymbolRefExpr::VK_PLT,
OutContext)),
STI);
}
void X86AsmPrinter::emitAsanMemaccessSymbols(Module &M) {
if (AsanMemaccessSymbols.empty())
return;
const Triple &TT = TM.getTargetTriple();
assert(TT.isOSBinFormatELF());
std::unique_ptr<MCSubtargetInfo> STI(
TM.getTarget().createMCSubtargetInfo(TT.str(), "", ""));
assert(STI && "Unable to create subtarget info");
for (auto &P : AsanMemaccessSymbols) {
MCSymbol *Sym = P.second;
OutStreamer->SwitchSection(OutContext.getELFSection(
".text.hot", ELF::SHT_PROGBITS,
ELF::SHF_EXECINSTR | ELF::SHF_ALLOC | ELF::SHF_GROUP, 0, Sym->getName(),
/*IsComdat=*/true));
OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
OutStreamer->emitLabel(Sym);
unsigned Reg = std::get<0>(P.first);
ASanAccessInfo AccessInfo(std::get<1>(P.first));
if (AccessInfo.AccessSizeIndex < 3) {
emitAsanMemaccessPartial(M, Reg, AccessInfo, *STI);
} else {
emitAsanMemaccessFull(M, Reg, AccessInfo, *STI);
}
}
.addExpr(MCSymbolRefExpr::create(
OutContext.getOrCreateSymbol(SymName), OutContext)));
}
void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI,

View File

@ -5,8 +5,8 @@ target triple = "x86_64-unknown-linux-gnu"
define void @load1(i8* nocapture readonly %x) {
; CHECK: pushq %rax
; CHECK-NOT: push %rbp
; CHECK: callq __asan_check_load1_rn[[RN1:.*]]
; CHECK: callq __asan_check_store1_rn[[RN1]]
; CHECK: callq __asan_check_load_add_1_[[REG1:.*]]
; CHECK: callq __asan_check_store_add_1_[[REG1]]
; CHECK-NOT: pop %rbp
; CHECK: popq %rax
call void @llvm.asan.check.memaccess(i8* %x, i32 0)
@ -17,8 +17,8 @@ define void @load1(i8* nocapture readonly %x) {
define void @load2(i16* nocapture readonly %x) {
; CHECK: pushq %rax
; CHECK-NOT: push %rbp
; CHECK: callq __asan_check_load2_rn[[RN2:.*]]
; CHECK: callq __asan_check_store2_rn[[RN2]]
; CHECK: callq __asan_check_load_add_2_[[REG2:.*]]
; CHECK: callq __asan_check_store_add_2_[[REG2]]
; CHECK-NOT: pop %rbp
; CHECK: popq %rax
%1 = ptrtoint i16* %x to i64
@ -31,8 +31,8 @@ define void @load2(i16* nocapture readonly %x) {
define void @load4(i32* nocapture readonly %x) {
; CHECK: pushq %rax
; CHECK-NOT: push %rbp
; CHECK: callq __asan_check_load4_rn[[RN4:.*]]
; CHECK: callq __asan_check_store4_rn[[RN4]]
; CHECK: callq __asan_check_load_add_4_[[REG4:.*]]
; CHECK: callq __asan_check_store_add_4_[[REG4]]
; CHECK-NOT: pop %rbp
; CHECK: popq %rax
%1 = ptrtoint i32* %x to i64
@ -44,8 +44,8 @@ define void @load4(i32* nocapture readonly %x) {
define void @load8(i64* nocapture readonly %x) {
; CHECK: pushq %rax
; CHECK-NOT: push %rbp
; CHECK: callq __asan_check_load8_rn[[RN8:.*]]
; CHECK: callq __asan_check_store8_rn[[RN8]]
; CHECK: callq __asan_check_load_add_8_[[REG8:.*]]
; CHECK: callq __asan_check_store_add_8_[[REG8]]
; CHECK-NOT: pop %rbp
; CHECK: popq %rax
%1 = ptrtoint i64* %x to i64
@ -58,8 +58,8 @@ define void @load8(i64* nocapture readonly %x) {
define void @load16(i128* nocapture readonly %x) {
; CHECK: pushq %rax
; CHECK-NOT: push %rbp
; CHECK: callq __asan_check_load16_rn[[RN16:.*]]
; CHECK: callq __asan_check_store16_rn[[RN16]]
; CHECK: callq __asan_check_load_add_16_[[REG16:.*]]
; CHECK: callq __asan_check_store_add_16_[[REG16]]
; CHECK-NOT: pop %rbp
; CHECK: popq %rax
%1 = ptrtoint i128* %x to i64
@ -69,178 +69,4 @@ define void @load16(i128* nocapture readonly %x) {
ret void
}
; CHECK: .type __asan_check_load1_rn[[RN1]],@function
; CHECK-NEXT: .weak __asan_check_load1_rn[[RN1]]
; CHECK-NEXT: .hidden __asan_check_load1_rn[[RN1]]
; CHECK-NEXT: __asan_check_load1_rn[[RN1]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: movsbl 2147450880(%r10), %r10d
; CHECK-NEXT: testl %r10d, %r10d
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: movq [[REG]], %r11
; CHECK-NEXT: andl $7, %r11d
; CHECK-NEXT: cmpl %r10d, %r11d
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load1
; CHECK: .type __asan_check_load2_rn[[RN2]],@function
; CHECK-NEXT: .weak __asan_check_load2_rn[[RN2]]
; CHECK-NEXT: .hidden __asan_check_load2_rn[[RN2]]
; CHECK-NEXT: __asan_check_load2_rn[[RN2]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: movsbl 2147450880(%r10), %r10d
; CHECK-NEXT: testl %r10d, %r10d
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: movq [[REG]], %r11
; CHECK-NEXT: andl $7, %r11d
; CHECK-NEXT: addl $1, %r11d
; CHECK-NEXT: cmpl %r10d, %r11d
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load2
; CHECK: .type __asan_check_load4_rn[[RN4]],@function
; CHECK-NEXT: .weak __asan_check_load4_rn[[RN4]]
; CHECK-NEXT: .hidden __asan_check_load4_rn[[RN4]]
; CHECK-NEXT: __asan_check_load4_rn[[RN4]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: movsbl 2147450880(%r10), %r10d
; CHECK-NEXT: testl %r10d, %r10d
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: movq [[REG]], %r11
; CHECK-NEXT: andl $7, %r11d
; CHECK-NEXT: addl $3, %r11d
; CHECK-NEXT: cmpl %r10d, %r11d
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load4
; CHECK: .type __asan_check_load8_rn[[RN8]],@function
; CHECK-NEXT: .weak __asan_check_load8_rn[[RN8]]
; CHECK-NEXT: .hidden __asan_check_load8_rn[[RN8]]
; CHECK-NEXT: __asan_check_load8_rn[[RN8]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: cmpb $0, 2147450880(%r10)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load8
; CHECK: .type __asan_check_load16_rn[[RN16]],@function
; CHECK-NEXT: .weak __asan_check_load16_rn[[RN16]]
; CHECK-NEXT: .hidden __asan_check_load16_rn[[RN16]]
; CHECK-NEXT: __asan_check_load16_rn[[RN16]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: cmpw $0, 2147450880(%r10)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load16
; CHECK: .type __asan_check_store1_rn[[RN1]],@function
; CHECK-NEXT: .weak __asan_check_store1_rn[[RN1]]
; CHECK-NEXT: .hidden __asan_check_store1_rn[[RN1]]
; CHECK-NEXT: __asan_check_store1_rn[[RN1]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: movsbl 2147450880(%r10), %r10d
; CHECK-NEXT: testl %r10d, %r10d
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: movq [[REG]], %r11
; CHECK-NEXT: andl $7, %r11d
; CHECK-NEXT: cmpl %r10d, %r11d
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store1@PLT
; CHECK: .type __asan_check_store2_rn[[RN2]],@function
; CHECK-NEXT: .weak __asan_check_store2_rn[[RN2]]
; CHECK-NEXT: .hidden __asan_check_store2_rn[[RN2]]
; CHECK-NEXT: __asan_check_store2_rn[[RN2]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: movsbl 2147450880(%r10), %r10d
; CHECK-NEXT: testl %r10d, %r10d
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: movq [[REG]], %r11
; CHECK-NEXT: andl $7, %r11d
; CHECK-NEXT: addl $1, %r11d
; CHECK-NEXT: cmpl %r10d, %r11d
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store2@PLT
; CHECK: .type __asan_check_store4_rn[[RN4]],@function
; CHECK-NEXT: .weak __asan_check_store4_rn[[RN4]]
; CHECK-NEXT: .hidden __asan_check_store4_rn[[RN4]]
; CHECK-NEXT: __asan_check_store4_rn[[RN4]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: movsbl 2147450880(%r10), %r10d
; CHECK-NEXT: testl %r10d, %r10d
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: movq [[REG]], %r11
; CHECK-NEXT: andl $7, %r11d
; CHECK-NEXT: addl $3, %r11d
; CHECK-NEXT: cmpl %r10d, %r11d
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store4@PLT
; CHECK: .type __asan_check_store8_rn[[RN8]],@function
; CHECK-NEXT: .weak __asan_check_store8_rn[[RN8]]
; CHECK-NEXT: .hidden __asan_check_store8_rn[[RN8]]
; CHECK-NEXT: __asan_check_store8_rn[[RN8]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: cmpb $0, 2147450880(%r10)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store8@PLT
; CHECK: .type __asan_check_store16_rn[[RN16]],@function
; CHECK-NEXT: .weak __asan_check_store16_rn[[RN16]]
; CHECK-NEXT: .hidden __asan_check_store16_rn[[RN16]]
; CHECK-NEXT: __asan_check_store16_rn[[RN16]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: cmpw $0, 2147450880(%r10)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store16@PLT
declare void @llvm.asan.check.memaccess(i8*, i32 immarg)

View File

@ -1,256 +1,11 @@
; RUN: llc < %s | FileCheck %s
; XFAIL: *
; RUN: llc < %s
target triple = "x86_64-pc-win"
define void @load1(i8* nocapture readonly %x) {
; CHECK: pushq %rax
; CHECK-NOT: push %rbp
; CHECK: callq __asan_check_load1_rn[[RN1:.*]]
; CHECK: callq __asan_check_store1_rn[[RN1]]
; CHECK-NOT: pop %rbp
; CHECK: popq %rax
call void @llvm.asan.check.memaccess(i8* %x, i32 0)
call void @llvm.asan.check.memaccess(i8* %x, i32 32)
ret void
}
define void @load2(i16* nocapture readonly %x) {
; CHECK: pushq %rax
; CHECK-NOT: push %rbp
; CHECK: callq __asan_check_load2_rn[[RN2:.*]]
; CHECK: callq __asan_check_store2_rn[[RN2]]
; CHECK-NOT: pop %rbp
; CHECK: popq %rax
%1 = ptrtoint i16* %x to i64
%2 = bitcast i16* %x to i8*
call void @llvm.asan.check.memaccess(i8* %2, i32 2)
call void @llvm.asan.check.memaccess(i8* %2, i32 34)
ret void
}
define void @load4(i32* nocapture readonly %x) {
; CHECK: pushq %rax
; CHECK-NOT: push %rbp
; CHECK: callq __asan_check_load4_rn[[RN4:.*]]
; CHECK: callq __asan_check_store4_rn[[RN4]]
; CHECK-NOT: pop %rbp
; CHECK: popq %rax
%1 = ptrtoint i32* %x to i64
%2 = bitcast i32* %x to i8*
call void @llvm.asan.check.memaccess(i8* %2, i32 4)
call void @llvm.asan.check.memaccess(i8* %2, i32 36)
ret void
}
define void @load8(i64* nocapture readonly %x) {
; CHECK: pushq %rax
; CHECK-NOT: push %rbp
; CHECK: callq __asan_check_load8_rn[[RN8:.*]]
; CHECK: callq __asan_check_store8_rn[[RN8]]
; CHECK-NOT: pop %rbp
; CHECK: popq %rax
%1 = ptrtoint i64* %x to i64
%2 = bitcast i64* %x to i8*
call void @llvm.asan.check.memaccess(i8* %2, i32 6)
call void @llvm.asan.check.memaccess(i8* %2, i32 38)
ret void
}
define void @load16(i128* nocapture readonly %x) {
; CHECK: pushq %rax
; CHECK-NOT: push %rbp
; CHECK: callq __asan_check_load16_rn[[RN16:.*]]
; CHECK: callq __asan_check_store16_rn[[RN16]]
; CHECK-NOT: pop %rbp
; CHECK: popq %rax
%1 = ptrtoint i128* %x to i64
%2 = bitcast i128* %x to i8*
call void @llvm.asan.check.memaccess(i8* %2, i32 8)
call void @llvm.asan.check.memaccess(i8* %2, i32 40)
ret void
}
; CHECK: .type __asan_check_load1_rn[[RN1]],@function
; CHECK-NEXT: .weak __asan_check_load1_rn[[RN1]]
; CHECK-NEXT: .hidden __asan_check_load1_rn[[RN1]]
; CHECK-NEXT: __asan_check_load1_rn[[RN1]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: orq $17592186044416, %r10{{.*}}
; CHECK-NEXT: movb (%r10), %r10b
; CHECK-NEXT: testb %r10b, %r10b
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: movq [[REG]], %r11
; CHECK-NEXT: andl $7, %r11d
; CHECK-NEXT: cmpl %r10d, %r11d
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load1
; CHECK: .type __asan_check_load2_rn[[RN2]],@function
; CHECK-NEXT: .weak __asan_check_load2_rn[[RN2]]
; CHECK-NEXT: .hidden __asan_check_load2_rn[[RN2]]
; CHECK-NEXT: __asan_check_load2_rn[[RN2]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: orq $17592186044416, %r10{{.*}}
; CHECK-NEXT: movb (%r10), %r10b
; CHECK-NEXT: testb %r10b, %r10b
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: movq [[REG]], %r11
; CHECK-NEXT: andl $7, %r11d
; CHECK-NEXT: addl $1, %r11d
; CHECK-NEXT: cmpl %r10d, %r11d
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load2
; CHECK: .type __asan_check_load4_rn[[RN4]],@function
; CHECK-NEXT: .weak __asan_check_load4_rn[[RN4]]
; CHECK-NEXT: .hidden __asan_check_load4_rn[[RN4]]
; CHECK-NEXT: __asan_check_load4_rn[[RN4]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: orq $17592186044416, %r10{{.*}}
; CHECK-NEXT: movb (%r10), %r10b
; CHECK-NEXT: testb %r10b, %r10b
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: movq [[REG]], %r11
; CHECK-NEXT: andl $7, %r11d
; CHECK-NEXT: addl $3, %r11d
; CHECK-NEXT: cmpl %r10d, %r11d
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load4
; CHECK: .type __asan_check_load8_rn[[RN8]],@function
; CHECK-NEXT: .weak __asan_check_load8_rn[[RN8]]
; CHECK-NEXT: .hidden __asan_check_load8_rn[[RN8]]
; CHECK-NEXT: __asan_check_load8_rn[[RN8]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: orq $17592186044416, %r10{{.*}}
; CHECK-NEXT: cmpb $0, (%r10)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load8
; CHECK: .type __asan_check_load16_rn[[RN16]],@function
; CHECK-NEXT: .weak __asan_check_load16_rn[[RN16]]
; CHECK-NEXT: .hidden __asan_check_load16_rn[[RN16]]
; CHECK-NEXT: __asan_check_load16_rn[[RN16]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: orq $17592186044416, %r10{{.*}}
; CHECK-NEXT: cmpw $0, (%r10)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load16
; CHECK: .type __asan_check_store1_rn[[RN1]],@function
; CHECK-NEXT: .weak __asan_check_store1_rn[[RN1]]
; CHECK-NEXT: .hidden __asan_check_store1_rn[[RN1]]
; CHECK-NEXT: __asan_check_store1_rn[[RN1]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: orq $17592186044416, %r10
; CHECK-NEXT: movb (%r10), %r10b
; CHECK-NEXT: testb %r10b, %r10b
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: movq [[REG]], %r11
; CHECK-NEXT: andl $7, %r11d
; CHECK-NEXT: cmpl %r10d, %r11d
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store1@PLT
; CHECK: .type __asan_check_store2_rn[[RN2]],@function
; CHECK-NEXT: .weak __asan_check_store2_rn[[RN2]]
; CHECK-NEXT: .hidden __asan_check_store2_rn[[RN2]]
; CHECK-NEXT: __asan_check_store2_rn[[RN2]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: orq $17592186044416, %r10
; CHECK-NEXT: movb (%r10), %r10b
; CHECK-NEXT: testb %r10b, %r10b
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: movq [[REG]], %r11
; CHECK-NEXT: andl $7, %r11d
; CHECK-NEXT: addl $1, %r11d
; CHECK-NEXT: cmpl %r10d, %r11d
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store2@PLT
; CHECK: .type __asan_check_store4_rn[[RN4]],@function
; CHECK-NEXT: .weak __asan_check_store4_rn[[RN4]]
; CHECK-NEXT: .hidden __asan_check_store4_rn[[RN4]]
; CHECK-NEXT: __asan_check_store4_rn[[RN4]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: orq $17592186044416, %r10
; CHECK-NEXT: movb (%r10), %r10b
; CHECK-NEXT: testb %r10b, %r10b
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: movq [[REG]], %r11
; CHECK-NEXT: andl $7, %r11d
; CHECK-NEXT: addl $3, %r11d
; CHECK-NEXT: cmpl %r10d, %r11d
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store4@PLT
; CHECK: .type __asan_check_store8_rn[[RN8]],@function
; CHECK-NEXT: .weak __asan_check_store8_rn[[RN8]]
; CHECK-NEXT: .hidden __asan_check_store8_rn[[RN8]]
; CHECK-NEXT: __asan_check_store8_rn[[RN8]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: orq $17592186044416, %r10{{.*}}
; CHECK-NEXT: cmpb $0, (%r10)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store8@PLT
; CHECK: .type __asan_check_store16_rn[[RN16]],@function
; CHECK-NEXT: .weak __asan_check_store16_rn[[RN16]]
; CHECK-NEXT: .hidden __asan_check_store16_rn[[RN16]]
; CHECK-NEXT: __asan_check_store16_rn[[RN16]]:
; CHECK-NEXT: movq [[REG:.*]], %r10
; CHECK-NEXT: shrq $3, %r10
; CHECK-NEXT: orq $17592186044416, %r10{{.*}}
; CHECK-NEXT: cmpw $0, (%r10)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store16@PLT
declare void @llvm.asan.check.memaccess(i8*, i32 immarg)