This patch provides that bitfields are splitted even in case

when current field is not legal integer type.

Differential Revision: https://reviews.llvm.org/D39053

llvm-svn: 331979
This commit is contained in:
Strahinja Petrovic 2018-05-10 12:31:12 +00:00
parent 11a9de74c9
commit 0f274c0111
3 changed files with 35 additions and 11 deletions

View File

@ -1156,7 +1156,7 @@ def fxray_instrumentation_bundle :
def ffine_grained_bitfield_accesses : Flag<["-"],
"ffine-grained-bitfield-accesses">, Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Use separate accesses for bitfields with legal widths and alignments.">;
HelpText<"Use separate accesses for consecutive bitfield runs with legal widths and alignments.">;
def fno_fine_grained_bitfield_accesses : Flag<["-"],
"fno-fine-grained-bitfield-accesses">, Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Use large-integer access for consecutive bitfield runs.">;

View File

@ -404,19 +404,20 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
return;
}
// Check if current Field is better as a single field run. When current field
// Check if OffsetInRecord is better as a single field run. When OffsetInRecord
// has legal integer width, and its bitfield offset is naturally aligned, it
// is better to make the bitfield a separate storage component so as it can be
// accessed directly with lower cost.
auto IsBetterAsSingleFieldRun = [&](RecordDecl::field_iterator Field) {
auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
uint64_t StartBitOffset) {
if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
return false;
unsigned Width = Field->getBitWidthValue(Context);
if (!DataLayout.isLegalInteger(Width))
if (!DataLayout.isLegalInteger(OffsetInRecord))
return false;
// Make sure Field is natually aligned if it is treated as an IType integer.
if (getFieldBitOffset(*Field) %
Context.toBits(getAlignment(getIntNType(Width))) !=
// Make sure StartBitOffset is natually aligned if it is treated as an
// IType integer.
if (StartBitOffset %
Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
0)
return false;
return true;
@ -435,14 +436,15 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
Run = Field;
StartBitOffset = getFieldBitOffset(*Field);
Tail = StartBitOffset + Field->getBitWidthValue(Context);
StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Run);
StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
StartBitOffset);
}
++Field;
continue;
}
// If the start field of a new run is better as a single run, or
// if current field is better as a single run, or
// if current field (or consecutive fields) is better as a single run, or
// if current field has zero width bitfield and either
// UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
// true, or
@ -451,7 +453,7 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
// skip the block below and go ahead to emit the storage.
// Otherwise, try to add bitfields to the run.
if (!StartFieldAsSingleRun && Field != FieldEnd &&
!IsBetterAsSingleFieldRun(Field) &&
!IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
(!Field->isZeroLengthBitField(Context) ||
(!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
!Context.getTargetInfo().useBitFieldTypeAlignment())) &&

View File

@ -0,0 +1,22 @@
// RUN: %clang_cc1 -triple x86_64-linux-gnu -ffine-grained-bitfield-accesses \
// RUN: -emit-llvm -o - %s | FileCheck %s
struct S4 {
unsigned long f1:28;
unsigned long f2:4;
unsigned long f3:12;
};
struct S4 a4;
struct S5 {
unsigned long f1:28;
unsigned long f2:4;
unsigned long f3:28;
unsigned long f4:4;
unsigned long f5:12;
};
struct S5 a5;
// CHECK: %struct.S4 = type { i32, i16 }
// CHECK-NOT: %struct.S4 = type { i48 }
// CHECK: %struct.S5 = type { i32, i32, i16, [6 x i8] }
// CHECK-NOT: %struct.S5 = type { i80 }