forked from OSchip/llvm-project
[MSan] Shrink the register save area for non-SSE builds
If code is compiled for X86 without SSE support, the register save area doesn't contain FPU registers, so `AMD64FpEndOffset` should be equal to `AMD64GpEndOffset`. llvm-svn: 339414
This commit is contained in:
parent
88863a5f62
commit
75a954330b
|
@ -3249,8 +3249,11 @@ struct VarArgAMD64Helper : public VarArgHelper {
|
|||
// An unfortunate workaround for asymmetric lowering of va_arg stuff.
|
||||
// See a comment in visitCallSite for more details.
|
||||
static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
|
||||
static const unsigned AMD64FpEndOffset = 176;
|
||||
static const unsigned AMD64FpEndOffsetSSE = 176;
|
||||
// If SSE is disabled, fp_offset in va_list is zero.
|
||||
static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
|
||||
|
||||
unsigned AMD64FpEndOffset;
|
||||
Function &F;
|
||||
MemorySanitizer &MS;
|
||||
MemorySanitizerVisitor &MSV;
|
||||
|
@ -3262,7 +3265,18 @@ struct VarArgAMD64Helper : public VarArgHelper {
|
|||
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
|
||||
|
||||
VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
|
||||
MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
|
||||
MemorySanitizerVisitor &MSV)
|
||||
: F(F), MS(MS), MSV(MSV) {
|
||||
AMD64FpEndOffset = AMD64FpEndOffsetSSE;
|
||||
for (const auto &Attr : F.getAttributes().getFnAttributes()) {
|
||||
if (Attr.isStringAttribute() &&
|
||||
(Attr.getKindAsString() == "target-features")) {
|
||||
if (Attr.getValueAsString().contains("-sse"))
|
||||
AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ArgKind classifyArgument(Value* arg) {
|
||||
// A very rough approximation of X86_64 argument classification rules.
|
||||
|
|
|
@ -915,6 +915,26 @@ entry:
|
|||
; CHECK: call void (i32, ...) @VAArgStructFn
|
||||
; CHECK: ret void
|
||||
|
||||
; Same code compiled without SSE (see attributes below).
|
||||
; The register save area is only 48 bytes instead of 176.
|
||||
define void @VAArgStructNoSSE(%struct.StructByVal* nocapture %s) sanitize_memory #0 {
|
||||
entry:
|
||||
%agg.tmp2 = alloca %struct.StructByVal, align 8
|
||||
%0 = bitcast %struct.StructByVal* %s to i8*
|
||||
%agg.tmp.sroa.0.0..sroa_cast = bitcast %struct.StructByVal* %s to i64*
|
||||
%agg.tmp.sroa.0.0.copyload = load i64, i64* %agg.tmp.sroa.0.0..sroa_cast, align 4
|
||||
%agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, %struct.StructByVal* %s, i64 0, i32 2
|
||||
%agg.tmp.sroa.2.0..sroa_cast = bitcast i32* %agg.tmp.sroa.2.0..sroa_idx to i64*
|
||||
%agg.tmp.sroa.2.0.copyload = load i64, i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
|
||||
%1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
|
||||
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %0, i64 16, i1 false)
|
||||
call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval align 8 %agg.tmp2)
|
||||
ret void
|
||||
}
|
||||
|
||||
attributes #0 = { "target-features"="+fxsr,+x87,-sse" }
|
||||
|
||||
; CHECK: bitcast { i32, i32, i32, i32 }* {{.*}}@__msan_va_arg_tls {{.*}}, i64 48
|
||||
|
||||
declare i32 @InnerTailCall(i32 %a)
|
||||
|
||||
|
|
Loading…
Reference in New Issue