Revert "[ARM] Follow AACPS standard for volatile bit-fields access width"

This reverts commit 514df1b2bb.

Some of the buildbots got llvm-lit errors on CodeGen/volatile.c
This commit is contained in:
Ties Stuij 2020-09-08 18:43:59 +01:00
parent cd5c5c4848
commit d6f3f61231
8 changed files with 105 additions and 3521 deletions

View File

@ -392,13 +392,9 @@ CODEGENOPT(Addrsig, 1, 0)
/// Whether to emit unused static constants. /// Whether to emit unused static constants.
CODEGENOPT(KeepStaticConsts, 1, 0) CODEGENOPT(KeepStaticConsts, 1, 0)
/// Whether to follow the AAPCS enforcing at least one read before storing to a volatile bitfield /// Whether to not follow the AAPCS that enforce at least one read before storing to a volatile bitfield
CODEGENOPT(ForceAAPCSBitfieldLoad, 1, 0) CODEGENOPT(ForceAAPCSBitfieldLoad, 1, 0)
/// Whether to not follow the AAPCS that enforces volatile bit-field access width to be
/// according to the field declaring type width.
CODEGENOPT(AAPCSBitfieldWidth, 1, 1)
#undef CODEGENOPT #undef CODEGENOPT
#undef ENUM_CODEGENOPT #undef ENUM_CODEGENOPT
#undef VALUE_CODEGENOPT #undef VALUE_CODEGENOPT

View File

@ -2363,15 +2363,9 @@ def mno_neg_immediates: Flag<["-"], "mno-neg-immediates">, Group<m_arm_Features_
def mcmse : Flag<["-"], "mcmse">, Group<m_arm_Features_Group>, def mcmse : Flag<["-"], "mcmse">, Group<m_arm_Features_Group>,
Flags<[DriverOption,CC1Option]>, Flags<[DriverOption,CC1Option]>,
HelpText<"Allow use of CMSE (Armv8-M Security Extensions)">; HelpText<"Allow use of CMSE (Armv8-M Security Extensions)">;
def ForceAAPCSBitfieldLoad : Flag<["-"], "faapcs-bitfield-load">, Group<m_arm_Features_Group>, def ForceAAPCSBitfieldLoad : Flag<["-"], "fAAPCSBitfieldLoad">, Group<m_arm_Features_Group>,
Flags<[DriverOption,CC1Option]>, Flags<[DriverOption,CC1Option]>,
HelpText<"Follows the AAPCS standard that all volatile bit-field write generates at least one load. (ARM only).">; HelpText<"Follows the AAPCS standard that all volatile bit-field write generates at least one load. (ARM only).">;
def ForceNoAAPCSBitfieldWidth : Flag<["-"], "fno-aapcs-bitfield-width">, Group<m_arm_Features_Group>,
Flags<[DriverOption,CC1Option]>,
HelpText<"Do not follow the AAPCS standard requirement that volatile bit-field width is dictated by the field container type. (ARM only).">;
def AAPCSBitfieldWidth : Flag<["-"], "faapcs-bitfield-width">, Group<m_arm_Features_Group>,
Flags<[DriverOption,CC1Option]>,
HelpText<"Follow the AAPCS standard requirement stating that volatile bit-field width is dictated by the field container type. (ARM only).">;
def mgeneral_regs_only : Flag<["-"], "mgeneral-regs-only">, Group<m_aarch64_Features_Group>, def mgeneral_regs_only : Flag<["-"], "mgeneral-regs-only">, Group<m_aarch64_Features_Group>,
HelpText<"Generate code which only uses the general purpose registers (AArch64 only)">; HelpText<"Generate code which only uses the general purpose registers (AArch64 only)">;

View File

@ -1927,27 +1927,22 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
llvm::Type *ResLTy = ConvertType(LV.getType()); llvm::Type *ResLTy = ConvertType(LV.getType());
Address Ptr = LV.getBitFieldAddress(); Address Ptr = LV.getBitFieldAddress();
llvm::Value *Val = llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
bool UseVolatile = LV.isVolatileQualified() &&
Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
const unsigned StorageSize =
UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
if (Info.IsSigned) { if (Info.IsSigned) {
assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize); assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize);
unsigned HighBits = StorageSize - Offset - Info.Size; unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size;
if (HighBits) if (HighBits)
Val = Builder.CreateShl(Val, HighBits, "bf.shl"); Val = Builder.CreateShl(Val, HighBits, "bf.shl");
if (Offset + HighBits) if (Info.Offset + HighBits)
Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr"); Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr");
} else { } else {
if (Offset) if (Info.Offset)
Val = Builder.CreateLShr(Val, Offset, "bf.lshr"); Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr");
if (static_cast<unsigned>(Offset) + Info.Size < StorageSize) if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize)
Val = Builder.CreateAnd( Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize,
Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear"); Info.Size),
"bf.clear");
} }
Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast"); Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
EmitScalarRangeCheck(Val, LV.getType(), Loc); EmitScalarRangeCheck(Val, LV.getType(), Loc);
@ -2149,43 +2144,39 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
/*isSigned=*/false); /*isSigned=*/false);
llvm::Value *MaskedVal = SrcVal; llvm::Value *MaskedVal = SrcVal;
const bool UseVolatile = CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
Dst.isVolatileQualified() &&
Info.VolatileStorageSize != 0 &&
isAAPCS(CGM.getTarget());
const unsigned StorageSize =
UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
// See if there are other bits in the bitfield's storage we'll need to load // See if there are other bits in the bitfield's storage we'll need to load
// and mask together with source before storing. // and mask together with source before storing.
if (StorageSize != Info.Size) { if (Info.StorageSize != Info.Size) {
assert(StorageSize > Info.Size && "Invalid bitfield size."); assert(Info.StorageSize > Info.Size && "Invalid bitfield size.");
llvm::Value *Val = llvm::Value *Val =
Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load"); Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
// Mask the source value as needed. // Mask the source value as needed.
if (!hasBooleanRepresentation(Dst.getType())) if (!hasBooleanRepresentation(Dst.getType()))
SrcVal = Builder.CreateAnd( SrcVal = Builder.CreateAnd(SrcVal,
SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), llvm::APInt::getLowBitsSet(Info.StorageSize,
"bf.value"); Info.Size),
"bf.value");
MaskedVal = SrcVal; MaskedVal = SrcVal;
if (Offset) if (Info.Offset)
SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl"); SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl");
// Mask out the original value. // Mask out the original value.
Val = Builder.CreateAnd( Val = Builder.CreateAnd(Val,
Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size), ~llvm::APInt::getBitsSet(Info.StorageSize,
"bf.clear"); Info.Offset,
Info.Offset + Info.Size),
"bf.clear");
// Or together the unchanged values and the source value. // Or together the unchanged values and the source value.
SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set"); SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
} else { } else {
assert(Offset == 0); assert(Info.Offset == 0);
// According to the AACPS: // According to the AACPS:
// When a volatile bit-field is written, and its container does not overlap // When a volatile bit-field is written, and its container does not overlap
// with any non-bit-field member, its container must be read exactly once // with any non-bit-field member, its container must be read exactly once and
// and written exactly once using the access width appropriate to the type // written exactly once using the access width appropriate to the type of the
// of the container. The two accesses are not atomic. // container. The two accesses are not atomic.
if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) && if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
Builder.CreateLoad(Ptr, true, "bf.load"); Builder.CreateLoad(Ptr, true, "bf.load");
@ -2200,8 +2191,8 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Sign extend the value if needed. // Sign extend the value if needed.
if (Info.IsSigned) { if (Info.IsSigned) {
assert(Info.Size <= StorageSize); assert(Info.Size <= Info.StorageSize);
unsigned HighBits = StorageSize - Info.Size; unsigned HighBits = Info.StorageSize - Info.Size;
if (HighBits) { if (HighBits) {
ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl"); ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr"); ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
@ -4213,45 +4204,32 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
if (field->isBitField()) { if (field->isBitField()) {
const CGRecordLayout &RL = const CGRecordLayout &RL =
CGM.getTypes().getCGRecordLayout(field->getParent()); CGM.getTypes().getCGRecordLayout(field->getParent());
const CGBitFieldInfo &Info = RL.getBitFieldInfo(field); const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
Info.VolatileStorageSize != 0 &&
field->getType()
.withCVRQualifiers(base.getVRQualifiers())
.isVolatileQualified();
Address Addr = base.getAddress(*this); Address Addr = base.getAddress(*this);
unsigned Idx = RL.getLLVMFieldNo(field); unsigned Idx = RL.getLLVMFieldNo(field);
const RecordDecl *rec = field->getParent(); const RecordDecl *rec = field->getParent();
if (!UseVolatile) { if (!IsInPreservedAIRegion &&
if (!IsInPreservedAIRegion && (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
(!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) { if (Idx != 0)
if (Idx != 0) // For structs, we GEP to the field that the record layout suggests.
// For structs, we GEP to the field that the record layout suggests. Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
Addr = Builder.CreateStructGEP(Addr, Idx, field->getName()); } else {
} else { llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType( getContext().getRecordType(rec), rec->getLocation());
getContext().getRecordType(rec), rec->getLocation()); Addr = Builder.CreatePreserveStructAccessIndex(Addr, Idx,
Addr = Builder.CreatePreserveStructAccessIndex( getDebugInfoFIndex(rec, field->getFieldIndex()),
Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
DbgInfo);
}
}
const unsigned SS =
UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
// Get the access type.
llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
if (Addr.getElementType() != FieldIntTy)
Addr = Builder.CreateElementBitCast(Addr, FieldIntTy);
if (UseVolatile) {
const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
if (VolatileOffset)
Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
} }
// Get the access type.
llvm::Type *FieldIntTy =
llvm::Type::getIntNTy(getLLVMContext(), Info.StorageSize);
if (Addr.getElementType() != FieldIntTy)
Addr = Builder.CreateElementBitCast(Addr, FieldIntTy);
QualType fieldType = QualType fieldType =
field->getType().withCVRQualifiers(base.getVRQualifiers()); field->getType().withCVRQualifiers(base.getVRQualifiers());
// TODO: Support TBAA for bit fields. // TODO: Support TBAA for bit fields.
LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo, return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,

View File

@ -46,7 +46,7 @@ namespace CodeGen {
/// }; /// };
/// ///
/// This will end up as the following LLVM type. The first array is the /// This will end up as the following LLVM type. The first array is the
/// bitfield, and the second is the padding out to a 4-byte alignment. /// bitfield, and the second is the padding out to a 4-byte alignmnet.
/// ///
/// %t = type { i8, i8, i8, i8, i8, [3 x i8] } /// %t = type { i8, i8, i8, i8, i8, [3 x i8] }
/// ///
@ -80,21 +80,8 @@ struct CGBitFieldInfo {
/// The offset of the bitfield storage from the start of the struct. /// The offset of the bitfield storage from the start of the struct.
CharUnits StorageOffset; CharUnits StorageOffset;
/// The offset within a contiguous run of bitfields that are represented as a
/// single "field" within the LLVM struct type, taking into account the AAPCS
/// rules for volatile bitfields. This offset is in bits.
unsigned VolatileOffset : 16;
/// The storage size in bits which should be used when accessing this
/// bitfield.
unsigned VolatileStorageSize;
/// The offset of the bitfield storage from the start of the struct.
CharUnits VolatileStorageOffset;
CGBitFieldInfo() CGBitFieldInfo()
: Offset(), Size(), IsSigned(), StorageSize(), StorageOffset(), : Offset(), Size(), IsSigned(), StorageSize(), StorageOffset() {}
VolatileOffset(), VolatileStorageSize(), VolatileStorageOffset() {}
CGBitFieldInfo(unsigned Offset, unsigned Size, bool IsSigned, CGBitFieldInfo(unsigned Offset, unsigned Size, bool IsSigned,
unsigned StorageSize, CharUnits StorageOffset) unsigned StorageSize, CharUnits StorageOffset)

View File

@ -109,14 +109,6 @@ struct CGRecordLowering {
D->isMsStruct(Context); D->isMsStruct(Context);
} }
/// Helper function to check if we are targeting AAPCS.
bool isAAPCS() const {
return Context.getTargetInfo().getABI().startswith("aapcs");
}
/// Helper function to check if the target machine is BigEndian.
bool isBE() const { return Context.getTargetInfo().isBigEndian(); }
/// The Itanium base layout rule allows virtual bases to overlap /// The Itanium base layout rule allows virtual bases to overlap
/// other bases, which complicates layout in specific ways. /// other bases, which complicates layout in specific ways.
/// ///
@ -180,8 +172,7 @@ struct CGRecordLowering {
void lowerUnion(); void lowerUnion();
void accumulateFields(); void accumulateFields();
void accumulateBitFields(RecordDecl::field_iterator Field, void accumulateBitFields(RecordDecl::field_iterator Field,
RecordDecl::field_iterator FieldEnd); RecordDecl::field_iterator FieldEnd);
void computeVolatileBitfields();
void accumulateBases(); void accumulateBases();
void accumulateVPtrs(); void accumulateVPtrs();
void accumulateVBases(); void accumulateVBases();
@ -246,10 +237,6 @@ void CGRecordLowering::setBitFieldInfo(
// least-significant-bit. // least-significant-bit.
if (DataLayout.isBigEndian()) if (DataLayout.isBigEndian())
Info.Offset = Info.StorageSize - (Info.Offset + Info.Size); Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
Info.VolatileStorageSize = 0;
Info.VolatileOffset = 0;
Info.VolatileStorageOffset = CharUnits::Zero();
} }
void CGRecordLowering::lower(bool NVBaseType) { void CGRecordLowering::lower(bool NVBaseType) {
@ -274,21 +261,15 @@ void CGRecordLowering::lower(bool NVBaseType) {
// 8) Format the complete list of members in a way that can be consumed by // 8) Format the complete list of members in a way that can be consumed by
// CodeGenTypes::ComputeRecordLayout. // CodeGenTypes::ComputeRecordLayout.
CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize(); CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
if (D->isUnion()) { if (D->isUnion())
lowerUnion(); return lowerUnion();
computeVolatileBitfields();
return;
}
accumulateFields(); accumulateFields();
// RD implies C++. // RD implies C++.
if (RD) { if (RD) {
accumulateVPtrs(); accumulateVPtrs();
accumulateBases(); accumulateBases();
if (Members.empty()) { if (Members.empty())
appendPaddingBytes(Size); return appendPaddingBytes(Size);
computeVolatileBitfields();
return;
}
if (!NVBaseType) if (!NVBaseType)
accumulateVBases(); accumulateVBases();
} }
@ -300,7 +281,6 @@ void CGRecordLowering::lower(bool NVBaseType) {
Members.pop_back(); Members.pop_back();
calculateZeroInit(); calculateZeroInit();
fillOutputFields(); fillOutputFields();
computeVolatileBitfields();
} }
void CGRecordLowering::lowerUnion() { void CGRecordLowering::lowerUnion() {
@ -438,9 +418,9 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) || if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
!DataLayout.fitsInLegalInteger(OffsetInRecord)) !DataLayout.fitsInLegalInteger(OffsetInRecord))
return false; return false;
// Make sure StartBitOffset is naturally aligned if it is treated as an // Make sure StartBitOffset is natually aligned if it is treated as an
// IType integer. // IType integer.
if (StartBitOffset % if (StartBitOffset %
Context.toBits(getAlignment(getIntNType(OffsetInRecord))) != Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
0) 0)
return false; return false;
@ -523,123 +503,6 @@ void CGRecordLowering::accumulateBases() {
} }
} }
/// The AAPCS that defines that, when possible, bit-fields should
/// be accessed using containers of the declared type width:
/// When a volatile bit-field is read, and its container does not overlap with
/// any non-bit-field member or any zero length bit-field member, its container
/// must be read exactly once using the access width appropriate to the type of
/// the container. When a volatile bit-field is written, and its container does
/// not overlap with any non-bit-field member or any zero-length bit-field
/// member, its container must be read exactly once and written exactly once
/// using the access width appropriate to the type of the container. The two
/// accesses are not atomic.
///
/// Enforcing the width restriction can be disabled using
/// -fno-aapcs-bitfield-width.
void CGRecordLowering::computeVolatileBitfields() {
if (!isAAPCS() || !Types.getCodeGenOpts().AAPCSBitfieldWidth)
return;
for (auto &I : BitFields) {
const FieldDecl *Field = I.first;
CGBitFieldInfo &Info = I.second;
llvm::Type *ResLTy = Types.ConvertTypeForMem(Field->getType());
// If the record alignment is less than the type width, we can't enforce a
// aligned load, bail out.
if ((uint64_t)(Context.toBits(Layout.getAlignment())) <
ResLTy->getPrimitiveSizeInBits())
continue;
// CGRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
// for big-endian targets, but it assumes a container of width
// Info.StorageSize. Since AAPCS uses a different container size (width
// of the type), we first undo that calculation here and redo it once
// the bit-field offset within the new container is calculated.
const unsigned OldOffset =
isBE() ? Info.StorageSize - (Info.Offset + Info.Size) : Info.Offset;
// Offset to the bit-field from the beginning of the struct.
const unsigned AbsoluteOffset =
Context.toBits(Info.StorageOffset) + OldOffset;
// Container size is the width of the bit-field type.
const unsigned StorageSize = ResLTy->getPrimitiveSizeInBits();
// Nothing to do if the access uses the desired
// container width and is naturally aligned.
if (Info.StorageSize == StorageSize && (OldOffset % StorageSize == 0))
continue;
// Offset within the container.
unsigned Offset = AbsoluteOffset & (StorageSize - 1);
// Bail out if an aligned load of the container cannot cover the entire
// bit-field. This can happen for example, if the bit-field is part of a
// packed struct. AAPCS does not define access rules for such cases, we let
// clang to follow its own rules.
if (Offset + Info.Size > StorageSize)
continue;
// Re-adjust offsets for big-endian targets.
if (isBE())
Offset = StorageSize - (Offset + Info.Size);
const CharUnits StorageOffset =
Context.toCharUnitsFromBits(AbsoluteOffset & ~(StorageSize - 1));
const CharUnits End = StorageOffset +
Context.toCharUnitsFromBits(StorageSize) -
CharUnits::One();
const ASTRecordLayout &Layout =
Context.getASTRecordLayout(Field->getParent());
// If we access outside memory outside the record, than bail out.
const CharUnits RecordSize = Layout.getSize();
if (End >= RecordSize)
continue;
// Bail out if performing this load would access non-bit-fields members.
bool Conflict = false;
for (const auto *F : D->fields()) {
// Allow sized bit-fields overlaps.
if (F->isBitField() && !F->isZeroLengthBitField(Context))
continue;
const CharUnits FOffset = Context.toCharUnitsFromBits(
Layout.getFieldOffset(F->getFieldIndex()));
// As C11 defines, a zero sized bit-field defines a barrier, so
// fields after and before it should be race condition free.
// The AAPCS acknowledges it and imposes no restritions when the
// natural container overlaps a zero-length bit-field.
if (F->isZeroLengthBitField(Context)) {
if (End > FOffset && StorageOffset < FOffset) {
Conflict = true;
break;
}
}
const CharUnits FEnd =
FOffset +
Context.toCharUnitsFromBits(
Types.ConvertTypeForMem(F->getType())->getPrimitiveSizeInBits()) -
CharUnits::One();
// If no overlap, continue.
if (End < FOffset || FEnd < StorageOffset)
continue;
// The desired load overlaps a non-bit-field member, bail out.
Conflict = true;
break;
}
if (Conflict)
continue;
// Write the new bit-field access parameters.
// As the storage offset now is defined as the number of elements from the
// start of the structure, we should divide the Offset by the element size.
Info.VolatileStorageOffset =
StorageOffset / Context.toCharUnitsFromBits(StorageSize).getQuantity();
Info.VolatileStorageSize = StorageSize;
Info.VolatileOffset = Offset;
}
}
void CGRecordLowering::accumulateVPtrs() { void CGRecordLowering::accumulateVPtrs() {
if (Layout.hasOwnVFPtr()) if (Layout.hasOwnVFPtr())
Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr, Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
@ -985,10 +848,8 @@ CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
assert(Info.StorageSize <= SL->getSizeInBits() && assert(Info.StorageSize <= SL->getSizeInBits() &&
"Union not large enough for bitfield storage"); "Union not large enough for bitfield storage");
} else { } else {
assert((Info.StorageSize == assert(Info.StorageSize ==
getDataLayout().getTypeAllocSizeInBits(ElementTy) || getDataLayout().getTypeAllocSizeInBits(ElementTy) &&
Info.VolatileStorageSize ==
getDataLayout().getTypeAllocSizeInBits(ElementTy)) &&
"Storage size does not match the element type size"); "Storage size does not match the element type size");
} }
assert(Info.Size > 0 && "Empty bitfield!"); assert(Info.Size > 0 && "Empty bitfield!");
@ -1036,12 +897,11 @@ LLVM_DUMP_METHOD void CGRecordLayout::dump() const {
void CGBitFieldInfo::print(raw_ostream &OS) const { void CGBitFieldInfo::print(raw_ostream &OS) const {
OS << "<CGBitFieldInfo" OS << "<CGBitFieldInfo"
<< " Offset:" << Offset << " Size:" << Size << " IsSigned:" << IsSigned << " Offset:" << Offset
<< " Size:" << Size
<< " IsSigned:" << IsSigned
<< " StorageSize:" << StorageSize << " StorageSize:" << StorageSize
<< " StorageOffset:" << StorageOffset.getQuantity() << " StorageOffset:" << StorageOffset.getQuantity() << ">";
<< " VolatileOffset:" << VolatileOffset
<< " VolatileStorageSize:" << VolatileStorageSize
<< " VolatileStorageOffset:" << VolatileStorageOffset.getQuantity() << ">";
} }
LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const { LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const {

View File

@ -1453,9 +1453,6 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
std::string(Args.getLastArgValue(OPT_fsymbol_partition_EQ)); std::string(Args.getLastArgValue(OPT_fsymbol_partition_EQ));
Opts.ForceAAPCSBitfieldLoad = Args.hasArg(OPT_ForceAAPCSBitfieldLoad); Opts.ForceAAPCSBitfieldLoad = Args.hasArg(OPT_ForceAAPCSBitfieldLoad);
Opts.AAPCSBitfieldWidth = Args.hasFlag(OPT_AAPCSBitfieldWidth,
OPT_ForceNoAAPCSBitfieldWidth,
true);
return Success; return Success;
} }

File diff suppressed because it is too large Load Diff

View File

@ -14,7 +14,7 @@
// CHECK-RECORD: LLVMType:%struct.s0 = type { [3 x i8] } // CHECK-RECORD: LLVMType:%struct.s0 = type { [3 x i8] }
// CHECK-RECORD: IsZeroInitializable:1 // CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[ // CHECK-RECORD: BitFields:[
// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:24 IsSigned:1 StorageSize:24 StorageOffset:0 // CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:24 IsSigned:1 StorageSize:24 StorageOffset:0>
struct __attribute((packed)) s0 { struct __attribute((packed)) s0 {
int f0 : 24; int f0 : 24;
}; };
@ -54,8 +54,8 @@ unsigned long long test_0() {
// CHECK-RECORD: LLVMType:%struct.s1 = type { [3 x i8] } // CHECK-RECORD: LLVMType:%struct.s1 = type { [3 x i8] }
// CHECK-RECORD: IsZeroInitializable:1 // CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[ // CHECK-RECORD: BitFields:[
// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0 // CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0>
// CHECK-RECORD: <CGBitFieldInfo Offset:10 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0 // CHECK-RECORD: <CGBitFieldInfo Offset:10 Size:10 IsSigned:1 StorageSize:24 StorageOffset:0>
#pragma pack(push) #pragma pack(push)
#pragma pack(1) #pragma pack(1)
@ -102,7 +102,7 @@ unsigned long long test_1() {
// CHECK-RECORD: LLVMType:%union.u2 = type { i8 } // CHECK-RECORD: LLVMType:%union.u2 = type { i8 }
// CHECK-RECORD: IsZeroInitializable:1 // CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[ // CHECK-RECORD: BitFields:[
// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageOffset:0 // CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageOffset:0>
union __attribute__((packed)) u2 { union __attribute__((packed)) u2 {
unsigned long long f0 : 3; unsigned long long f0 : 3;
@ -274,8 +274,8 @@ _Bool test_6() {
// CHECK-RECORD: LLVMType:%struct.s7 = type { i32, i32, i32, i8, i32, [12 x i8] } // CHECK-RECORD: LLVMType:%struct.s7 = type { i32, i32, i32, i8, i32, [12 x i8] }
// CHECK-RECORD: IsZeroInitializable:1 // CHECK-RECORD: IsZeroInitializable:1
// CHECK-RECORD: BitFields:[ // CHECK-RECORD: BitFields:[
// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:12 // CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:12>
// CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:29 IsSigned:1 StorageSize:32 StorageOffset:16 // CHECK-RECORD: <CGBitFieldInfo Offset:0 Size:29 IsSigned:1 StorageSize:32 StorageOffset:16>
struct __attribute__((aligned(16))) s7 { struct __attribute__((aligned(16))) s7 {
int a, b, c; int a, b, c;