2013-03-08 05:37:12 +08:00
|
|
|
//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains the code for emitting atomic operations.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "CGCall.h"
|
2015-01-22 14:17:56 +08:00
|
|
|
#include "CGRecordLayout.h"
|
2016-07-19 03:02:11 +08:00
|
|
|
#include "CodeGenFunction.h"
|
2013-03-08 05:37:12 +08:00
|
|
|
#include "CodeGenModule.h"
|
2017-08-05 02:16:31 +08:00
|
|
|
#include "TargetInfo.h"
|
2013-03-08 05:37:12 +08:00
|
|
|
#include "clang/AST/ASTContext.h"
|
2013-10-31 05:53:58 +08:00
|
|
|
#include "clang/CodeGen/CGFunctionInfo.h"
|
2017-08-16 00:02:49 +08:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2013-03-08 05:37:12 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2013-03-08 05:37:17 +08:00
|
|
|
#include "llvm/IR/Operator.h"
|
2013-03-08 05:37:12 +08:00
|
|
|
|
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
2013-03-08 05:37:17 +08:00
|
|
|
namespace {
|
|
|
|
class AtomicInfo {
|
|
|
|
CodeGenFunction &CGF;
|
|
|
|
QualType AtomicTy;
|
|
|
|
QualType ValueTy;
|
|
|
|
uint64_t AtomicSizeInBits;
|
|
|
|
uint64_t ValueSizeInBits;
|
|
|
|
CharUnits AtomicAlign;
|
|
|
|
CharUnits ValueAlign;
|
|
|
|
CharUnits LValueAlign;
|
|
|
|
TypeEvaluationKind EvaluationKind;
|
|
|
|
bool UseLibcall;
|
2015-01-22 14:17:56 +08:00
|
|
|
LValue LVal;
|
|
|
|
CGBitFieldInfo BFI;
|
2013-03-08 05:37:17 +08:00
|
|
|
public:
|
2015-01-22 14:17:56 +08:00
|
|
|
AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
|
|
|
|
: CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
|
|
|
|
EvaluationKind(TEK_Scalar), UseLibcall(true) {
|
|
|
|
assert(!lvalue.isGlobalReg());
|
2013-03-08 05:37:17 +08:00
|
|
|
ASTContext &C = CGF.getContext();
|
2015-01-22 14:17:56 +08:00
|
|
|
if (lvalue.isSimple()) {
|
|
|
|
AtomicTy = lvalue.getType();
|
|
|
|
if (auto *ATy = AtomicTy->getAs<AtomicType>())
|
|
|
|
ValueTy = ATy->getValueType();
|
|
|
|
else
|
|
|
|
ValueTy = AtomicTy;
|
|
|
|
EvaluationKind = CGF.getEvaluationKind(ValueTy);
|
|
|
|
|
|
|
|
uint64_t ValueAlignInBits;
|
|
|
|
uint64_t AtomicAlignInBits;
|
|
|
|
TypeInfo ValueTI = C.getTypeInfo(ValueTy);
|
|
|
|
ValueSizeInBits = ValueTI.Width;
|
|
|
|
ValueAlignInBits = ValueTI.Align;
|
|
|
|
|
|
|
|
TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
|
|
|
|
AtomicSizeInBits = AtomicTI.Width;
|
|
|
|
AtomicAlignInBits = AtomicTI.Align;
|
|
|
|
|
|
|
|
assert(ValueSizeInBits <= AtomicSizeInBits);
|
|
|
|
assert(ValueAlignInBits <= AtomicAlignInBits);
|
|
|
|
|
|
|
|
AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
|
|
|
|
ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
|
|
|
|
if (lvalue.getAlignment().isZero())
|
|
|
|
lvalue.setAlignment(AtomicAlign);
|
|
|
|
|
|
|
|
LVal = lvalue;
|
|
|
|
} else if (lvalue.isBitField()) {
|
2015-02-27 14:33:30 +08:00
|
|
|
ValueTy = lvalue.getType();
|
|
|
|
ValueSizeInBits = C.getTypeSize(ValueTy);
|
2015-01-22 14:17:56 +08:00
|
|
|
auto &OrigBFI = lvalue.getBitFieldInfo();
|
|
|
|
auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
|
|
|
|
AtomicSizeInBits = C.toBits(
|
|
|
|
C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
|
2016-01-15 05:00:27 +08:00
|
|
|
.alignTo(lvalue.getAlignment()));
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
|
2015-01-22 14:17:56 +08:00
|
|
|
auto OffsetInChars =
|
|
|
|
(C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
|
|
|
|
lvalue.getAlignment();
|
|
|
|
VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
|
|
|
|
VoidPtrAddr, OffsetInChars.getQuantity());
|
|
|
|
auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
|
|
|
|
VoidPtrAddr,
|
|
|
|
CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
|
|
|
|
"atomic_bitfield_base");
|
|
|
|
BFI = OrigBFI;
|
|
|
|
BFI.Offset = Offset;
|
|
|
|
BFI.StorageSize = AtomicSizeInBits;
|
Respect alignment of nested bitfields
tools/clang/test/CodeGen/packed-nest-unpacked.c contains this test:
struct XBitfield {
unsigned b1 : 10;
unsigned b2 : 12;
unsigned b3 : 10;
};
struct YBitfield {
char x;
struct XBitfield y;
} __attribute((packed));
struct YBitfield gbitfield;
unsigned test7() {
// CHECK: @test7
// CHECK: load i32, i32* getelementptr inbounds (%struct.YBitfield, %struct.YBitfield* @gbitfield, i32 0, i32 1, i32 0), align 4
return gbitfield.y.b2;
}
The "align 4" is actually wrong. Accessing all of "gbitfield.y" as a single
i32 is of course possible, but that still doesn't make it 4-byte aligned as
it remains packed at offset 1 in the surrounding gbitfield object.
This alignment was changed by commit r169489, which also introduced changes
to bitfield access code in CGExpr.cpp. Code before that change used to take
into account *both* the alignment of the field to be accessed within the
current struct, *and* the alignment of that outer struct itself; this logic
was removed by the above commit.
Neglecting to consider both values can cause incorrect code to be generated
(I've seen an unaligned access crash on SystemZ due to this bug).
In order to always use the best known alignment value, this patch removes
the CGBitFieldInfo::StorageAlignment member and replaces it with a
StorageOffset member specifying the offset from the start of the surrounding
struct to the bitfield's underlying storage. This offset can then be combined
with the best-known alignment for a bitfield access lvalue to determine the
alignment to use when accessing the bitfield's storage.
Differential Revision: http://reviews.llvm.org/D11034
llvm-svn: 241916
2015-07-11 01:30:00 +08:00
|
|
|
BFI.StorageOffset += OffsetInChars;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
|
2017-10-17 18:17:43 +08:00
|
|
|
BFI, lvalue.getType(), lvalue.getBaseInfo(),
|
|
|
|
lvalue.getTBAAInfo());
|
2015-02-27 14:33:30 +08:00
|
|
|
AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
|
|
|
|
if (AtomicTy.isNull()) {
|
|
|
|
llvm::APInt Size(
|
|
|
|
/*numBits=*/32,
|
|
|
|
C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
|
|
|
|
AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
|
|
|
|
/*IndexTypeQuals=*/0);
|
|
|
|
}
|
|
|
|
AtomicAlign = ValueAlign = lvalue.getAlignment();
|
2015-01-22 14:17:56 +08:00
|
|
|
} else if (lvalue.isVectorElt()) {
|
2015-02-27 14:33:30 +08:00
|
|
|
ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
|
|
|
|
ValueSizeInBits = C.getTypeSize(ValueTy);
|
|
|
|
AtomicTy = lvalue.getType();
|
|
|
|
AtomicSizeInBits = C.getTypeSize(AtomicTy);
|
|
|
|
AtomicAlign = ValueAlign = lvalue.getAlignment();
|
2015-01-22 14:17:56 +08:00
|
|
|
LVal = lvalue;
|
|
|
|
} else {
|
|
|
|
assert(lvalue.isExtVectorElt());
|
2015-02-27 14:33:30 +08:00
|
|
|
ValueTy = lvalue.getType();
|
|
|
|
ValueSizeInBits = C.getTypeSize(ValueTy);
|
|
|
|
AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
lvalue.getType(), lvalue.getExtVectorAddress()
|
|
|
|
.getElementType()->getVectorNumElements());
|
2015-02-27 14:33:30 +08:00
|
|
|
AtomicSizeInBits = C.getTypeSize(AtomicTy);
|
|
|
|
AtomicAlign = ValueAlign = lvalue.getAlignment();
|
2015-01-22 14:17:56 +08:00
|
|
|
LVal = lvalue;
|
|
|
|
}
|
2014-12-15 13:25:25 +08:00
|
|
|
UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
|
|
|
|
AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
|
2013-03-08 05:37:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
QualType getAtomicType() const { return AtomicTy; }
|
|
|
|
QualType getValueType() const { return ValueTy; }
|
|
|
|
CharUnits getAtomicAlignment() const { return AtomicAlign; }
|
|
|
|
CharUnits getValueAlignment() const { return ValueAlign; }
|
|
|
|
uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
|
2014-12-15 13:25:25 +08:00
|
|
|
uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
|
2013-03-08 05:37:17 +08:00
|
|
|
TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
|
|
|
|
bool shouldUseLibcall() const { return UseLibcall; }
|
2015-01-22 14:17:56 +08:00
|
|
|
const LValue &getAtomicLValue() const { return LVal; }
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *getAtomicPointer() const {
|
2015-02-27 14:33:30 +08:00
|
|
|
if (LVal.isSimple())
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return LVal.getPointer();
|
2015-02-27 14:33:30 +08:00
|
|
|
else if (LVal.isBitField())
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return LVal.getBitFieldPointer();
|
2015-02-27 14:33:30 +08:00
|
|
|
else if (LVal.isVectorElt())
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return LVal.getVectorPointer();
|
2015-02-27 14:33:30 +08:00
|
|
|
assert(LVal.isExtVectorElt());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return LVal.getExtVectorPointer();
|
|
|
|
}
|
|
|
|
Address getAtomicAddress() const {
|
|
|
|
return Address(getAtomicPointer(), getAtomicAlignment());
|
|
|
|
}
|
|
|
|
|
|
|
|
Address getAtomicAddressAsAtomicIntPointer() const {
|
|
|
|
return emitCastToAtomicIntPointer(getAtomicAddress());
|
2015-02-27 14:33:30 +08:00
|
|
|
}
|
2013-03-08 05:37:17 +08:00
|
|
|
|
|
|
|
/// Is the atomic size larger than the underlying value type?
|
|
|
|
///
|
|
|
|
/// Note that the absence of padding does not mean that atomic
|
|
|
|
/// objects are completely interchangeable with non-atomic
|
|
|
|
/// objects: we might have promoted the alignment of a type
|
|
|
|
/// without making it bigger.
|
|
|
|
bool hasPadding() const {
|
|
|
|
return (ValueSizeInBits != AtomicSizeInBits);
|
|
|
|
}
|
|
|
|
|
2015-01-22 14:17:56 +08:00
|
|
|
bool emitMemSetZeroIfNecessary() const;
|
2013-03-08 05:37:17 +08:00
|
|
|
|
|
|
|
llvm::Value *getAtomicSizeValue() const {
|
|
|
|
CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
|
|
|
|
return CGF.CGM.getSize(size);
|
|
|
|
}
|
|
|
|
|
2015-11-10 03:56:35 +08:00
|
|
|
/// Cast the given pointer to an integer pointer suitable for atomic
|
|
|
|
/// operations if the source.
|
|
|
|
Address emitCastToAtomicIntPointer(Address Addr) const;
|
|
|
|
|
|
|
|
/// If Addr is compatible with the iN that will be used for an atomic
|
|
|
|
/// operation, bitcast it. Otherwise, create a temporary that is suitable
|
|
|
|
/// and copy the value across.
|
|
|
|
Address convertToAtomicIntPointer(Address Addr) const;
|
2013-03-08 05:37:17 +08:00
|
|
|
|
|
|
|
/// Turn an atomic-layout object into an r-value.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
|
|
|
|
SourceLocation loc, bool AsValue) const;
|
2013-03-08 05:37:17 +08:00
|
|
|
|
2014-12-15 13:25:25 +08:00
|
|
|
/// \brief Converts a rvalue to integer value.
|
|
|
|
llvm::Value *convertRValueToInt(RValue RVal) const;
|
|
|
|
|
2015-02-27 14:33:30 +08:00
|
|
|
RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
|
|
|
|
AggValueSlot ResultSlot,
|
|
|
|
SourceLocation Loc, bool AsValue) const;
|
2014-12-15 13:25:25 +08:00
|
|
|
|
2013-03-08 05:37:17 +08:00
|
|
|
/// Copy an atomic r-value into atomic-layout memory.
|
2015-01-22 14:17:56 +08:00
|
|
|
void emitCopyIntoMemory(RValue rvalue) const;
|
2013-03-08 05:37:17 +08:00
|
|
|
|
|
|
|
/// Project an l-value down to the value field.
|
2015-01-22 14:17:56 +08:00
|
|
|
LValue projectValue() const {
|
|
|
|
assert(LVal.isSimple());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address addr = getAtomicAddress();
|
2013-03-08 05:37:17 +08:00
|
|
|
if (hasPadding())
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
|
2013-03-08 05:37:17 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
|
2017-10-06 16:17:48 +08:00
|
|
|
LVal.getBaseInfo(), LVal.getTBAAInfo());
|
2013-03-08 05:37:17 +08:00
|
|
|
}
|
|
|
|
|
2015-02-27 14:33:30 +08:00
|
|
|
/// \brief Emits atomic load.
|
|
|
|
/// \returns Loaded value.
|
|
|
|
RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
|
|
|
|
bool AsValue, llvm::AtomicOrdering AO,
|
|
|
|
bool IsVolatile);
|
|
|
|
|
|
|
|
/// \brief Emits atomic compare-and-exchange sequence.
|
|
|
|
/// \param Expected Expected value.
|
|
|
|
/// \param Desired Desired value.
|
|
|
|
/// \param Success Atomic ordering for success operation.
|
|
|
|
/// \param Failure Atomic ordering for failed operation.
|
|
|
|
/// \param IsWeak true if atomic operation is weak, false otherwise.
|
|
|
|
/// \returns Pair of values: previous value from storage (value type) and
|
|
|
|
/// boolean flag (i1 type) with true if success and false otherwise.
|
2016-04-07 01:26:42 +08:00
|
|
|
std::pair<RValue, llvm::Value *>
|
|
|
|
EmitAtomicCompareExchange(RValue Expected, RValue Desired,
|
|
|
|
llvm::AtomicOrdering Success =
|
|
|
|
llvm::AtomicOrdering::SequentiallyConsistent,
|
|
|
|
llvm::AtomicOrdering Failure =
|
|
|
|
llvm::AtomicOrdering::SequentiallyConsistent,
|
|
|
|
bool IsWeak = false);
|
2015-02-27 14:33:30 +08:00
|
|
|
|
2015-05-15 16:36:34 +08:00
|
|
|
/// \brief Emits atomic update.
|
2015-05-15 21:47:52 +08:00
|
|
|
/// \param AO Atomic ordering.
|
|
|
|
/// \param UpdateOp Update operation for the current lvalue.
|
2015-05-15 16:36:34 +08:00
|
|
|
void EmitAtomicUpdate(llvm::AtomicOrdering AO,
|
|
|
|
const llvm::function_ref<RValue(RValue)> &UpdateOp,
|
|
|
|
bool IsVolatile);
|
|
|
|
/// \brief Emits atomic update.
|
2015-05-15 21:47:52 +08:00
|
|
|
/// \param AO Atomic ordering.
|
2015-05-15 16:36:34 +08:00
|
|
|
void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
|
|
|
|
bool IsVolatile);
|
|
|
|
|
2013-03-08 05:37:17 +08:00
|
|
|
/// Materialize an atomic r-value in atomic-layout memory.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address materializeRValue(RValue rvalue) const;
|
2013-03-08 05:37:17 +08:00
|
|
|
|
2015-11-10 03:56:35 +08:00
|
|
|
/// \brief Creates temp alloca for intermediate operations on atomic value.
|
|
|
|
Address CreateTempAlloca() const;
|
2013-03-08 05:37:17 +08:00
|
|
|
private:
|
|
|
|
bool requiresMemSetZero(llvm::Type *type) const;
|
2015-02-27 14:33:30 +08:00
|
|
|
|
|
|
|
|
|
|
|
/// \brief Emits atomic load as a libcall.
|
|
|
|
void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
|
|
|
|
llvm::AtomicOrdering AO, bool IsVolatile);
|
|
|
|
/// \brief Emits atomic load as LLVM instruction.
|
|
|
|
llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
|
|
|
|
/// \brief Emits atomic compare-and-exchange op as a libcall.
|
2015-05-15 16:36:34 +08:00
|
|
|
llvm::Value *EmitAtomicCompareExchangeLibcall(
|
|
|
|
llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
|
2016-04-07 01:26:42 +08:00
|
|
|
llvm::AtomicOrdering Success =
|
|
|
|
llvm::AtomicOrdering::SequentiallyConsistent,
|
|
|
|
llvm::AtomicOrdering Failure =
|
|
|
|
llvm::AtomicOrdering::SequentiallyConsistent);
|
2015-02-27 14:33:30 +08:00
|
|
|
/// \brief Emits atomic compare-and-exchange op as LLVM instruction.
|
2015-05-15 16:36:34 +08:00
|
|
|
std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
|
|
|
|
llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
|
2016-04-07 01:26:42 +08:00
|
|
|
llvm::AtomicOrdering Success =
|
|
|
|
llvm::AtomicOrdering::SequentiallyConsistent,
|
|
|
|
llvm::AtomicOrdering Failure =
|
|
|
|
llvm::AtomicOrdering::SequentiallyConsistent,
|
2015-02-27 14:33:30 +08:00
|
|
|
bool IsWeak = false);
|
2015-05-15 16:36:34 +08:00
|
|
|
/// \brief Emit atomic update as libcalls.
|
|
|
|
void
|
|
|
|
EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
|
|
|
|
const llvm::function_ref<RValue(RValue)> &UpdateOp,
|
|
|
|
bool IsVolatile);
|
|
|
|
/// \brief Emit atomic update as LLVM instructions.
|
|
|
|
void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
|
|
|
|
const llvm::function_ref<RValue(RValue)> &UpdateOp,
|
|
|
|
bool IsVolatile);
|
|
|
|
/// \brief Emit atomic update as libcalls.
|
|
|
|
void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
|
|
|
|
bool IsVolatile);
|
|
|
|
/// \brief Emit atomic update as LLVM instructions.
|
|
|
|
void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
|
|
|
|
bool IsVolatile);
|
2013-03-08 05:37:17 +08:00
|
|
|
};
|
2015-06-23 07:07:51 +08:00
|
|
|
}
|
2013-03-08 05:37:17 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address AtomicInfo::CreateTempAlloca() const {
|
|
|
|
Address TempAlloca = CGF.CreateMemTemp(
|
2015-02-27 14:33:30 +08:00
|
|
|
(LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
|
|
|
|
: AtomicTy,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
getAtomicAlignment(),
|
2015-02-27 14:33:30 +08:00
|
|
|
"atomic-temp");
|
|
|
|
// Cast to pointer to value type for bitfields.
|
|
|
|
if (LVal.isBitField())
|
|
|
|
return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
TempAlloca, getAtomicAddress().getType());
|
2015-02-27 14:33:30 +08:00
|
|
|
return TempAlloca;
|
|
|
|
}
|
|
|
|
|
2013-03-08 05:37:17 +08:00
|
|
|
static RValue emitAtomicLibcall(CodeGenFunction &CGF,
|
|
|
|
StringRef fnName,
|
|
|
|
QualType resultType,
|
|
|
|
CallArgList &args) {
|
|
|
|
const CGFunctionInfo &fnInfo =
|
2016-03-11 12:30:31 +08:00
|
|
|
CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
|
2013-03-08 05:37:17 +08:00
|
|
|
llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
|
|
|
|
llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
|
2016-10-27 07:46:34 +08:00
|
|
|
auto callee = CGCallee::forDirect(fn);
|
|
|
|
return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
|
2013-03-08 05:37:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Does a store of the given IR type modify the full expected width?
|
|
|
|
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
|
|
|
|
uint64_t expectedSize) {
|
|
|
|
return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Does the atomic type require memsetting to zero before initialization?
|
|
|
|
///
|
|
|
|
/// The IR type is provided as a way of making certain queries faster.
|
|
|
|
bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
|
|
|
|
// If the atomic type has size padding, we definitely need a memset.
|
|
|
|
if (hasPadding()) return true;
|
|
|
|
|
|
|
|
// Otherwise, do some simple heuristics to try to avoid it:
|
|
|
|
switch (getEvaluationKind()) {
|
|
|
|
// For scalars and complexes, check whether the store size of the
|
|
|
|
// type uses the full size.
|
|
|
|
case TEK_Scalar:
|
|
|
|
return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
|
|
|
|
case TEK_Complex:
|
|
|
|
return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
|
|
|
|
AtomicSizeInBits / 2);
|
|
|
|
|
2013-07-11 09:32:21 +08:00
|
|
|
// Padding in structs has an undefined bit pattern. User beware.
|
2013-03-08 05:37:17 +08:00
|
|
|
case TEK_Aggregate:
|
2013-07-11 09:32:21 +08:00
|
|
|
return false;
|
2013-03-08 05:37:17 +08:00
|
|
|
}
|
|
|
|
llvm_unreachable("bad evaluation kind");
|
|
|
|
}
|
|
|
|
|
2015-01-22 14:17:56 +08:00
|
|
|
bool AtomicInfo::emitMemSetZeroIfNecessary() const {
|
|
|
|
assert(LVal.isSimple());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *addr = LVal.getPointer();
|
2013-03-08 05:37:17 +08:00
|
|
|
if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
|
2013-07-11 09:32:21 +08:00
|
|
|
return false;
|
2013-03-08 05:37:17 +08:00
|
|
|
|
2015-02-27 14:33:30 +08:00
|
|
|
CGF.Builder.CreateMemSet(
|
|
|
|
addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
|
|
|
|
CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
|
|
|
|
LVal.getAlignment().getQuantity());
|
2013-07-11 09:32:21 +08:00
|
|
|
return true;
|
2013-03-08 05:37:17 +08:00
|
|
|
}
|
|
|
|
|
2014-06-14 03:43:04 +08:00
|
|
|
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Dest, Address Ptr,
|
|
|
|
Address Val1, Address Val2,
|
|
|
|
uint64_t Size,
|
2014-03-14 03:25:48 +08:00
|
|
|
llvm::AtomicOrdering SuccessOrder,
|
2017-08-05 02:16:31 +08:00
|
|
|
llvm::AtomicOrdering FailureOrder,
|
|
|
|
llvm::SyncScope::ID Scope) {
|
2014-03-14 03:25:48 +08:00
|
|
|
// Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
|
|
|
|
llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
|
2014-03-14 03:25:48 +08:00
|
|
|
|
2014-06-13 22:24:59 +08:00
|
|
|
llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
|
2017-08-05 02:16:31 +08:00
|
|
|
Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
|
|
|
|
Scope);
|
2014-06-13 22:24:59 +08:00
|
|
|
Pair->setVolatile(E->isVolatile());
|
2014-06-14 03:43:04 +08:00
|
|
|
Pair->setWeak(IsWeak);
|
2014-03-14 03:25:48 +08:00
|
|
|
|
|
|
|
// Cmp holds the result of the compare-exchange operation: true on success,
|
|
|
|
// false on failure.
|
2014-06-13 22:24:59 +08:00
|
|
|
llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
|
|
|
|
llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
|
2014-03-14 03:25:48 +08:00
|
|
|
|
|
|
|
// This basic block is used to hold the store instruction if the operation
|
|
|
|
// failed.
|
|
|
|
llvm::BasicBlock *StoreExpectedBB =
|
|
|
|
CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
|
|
|
|
|
|
|
|
// This basic block is the exit point of the operation, we should end up
|
|
|
|
// here regardless of whether or not the operation succeeded.
|
|
|
|
llvm::BasicBlock *ContinueBB =
|
|
|
|
CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
|
|
|
|
|
|
|
|
// Update Expected if Expected isn't equal to Old, otherwise branch to the
|
|
|
|
// exit point.
|
|
|
|
CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
|
|
|
|
|
|
|
|
CGF.Builder.SetInsertPoint(StoreExpectedBB);
|
|
|
|
// Update the memory at Expected with Old's value.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.Builder.CreateStore(Old, Val1);
|
2014-03-14 03:25:48 +08:00
|
|
|
// Finally, branch to the exit point.
|
|
|
|
CGF.Builder.CreateBr(ContinueBB);
|
|
|
|
|
|
|
|
CGF.Builder.SetInsertPoint(ContinueBB);
|
|
|
|
// Update the memory at Dest with Cmp's value.
|
|
|
|
CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Given an ordering required on success, emit all possible cmpxchg
|
|
|
|
/// instructions to cope with the provided (but possibly only dynamically known)
|
|
|
|
/// FailureOrder.
|
|
|
|
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
|
2016-04-19 02:01:49 +08:00
|
|
|
bool IsWeak, Address Dest, Address Ptr,
|
|
|
|
Address Val1, Address Val2,
|
2014-03-14 03:25:48 +08:00
|
|
|
llvm::Value *FailureOrderVal,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
uint64_t Size,
|
2017-08-05 02:16:31 +08:00
|
|
|
llvm::AtomicOrdering SuccessOrder,
|
|
|
|
llvm::SyncScope::ID Scope) {
|
2014-03-14 03:25:48 +08:00
|
|
|
llvm::AtomicOrdering FailureOrder;
|
|
|
|
if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
|
2016-04-19 02:01:49 +08:00
|
|
|
auto FOS = FO->getSExtValue();
|
|
|
|
if (!llvm::isValidAtomicOrderingCABI(FOS))
|
2016-04-07 01:26:42 +08:00
|
|
|
FailureOrder = llvm::AtomicOrdering::Monotonic;
|
2016-04-19 02:01:49 +08:00
|
|
|
else
|
|
|
|
switch ((llvm::AtomicOrderingCABI)FOS) {
|
|
|
|
case llvm::AtomicOrderingCABI::relaxed:
|
|
|
|
case llvm::AtomicOrderingCABI::release:
|
|
|
|
case llvm::AtomicOrderingCABI::acq_rel:
|
|
|
|
FailureOrder = llvm::AtomicOrdering::Monotonic;
|
|
|
|
break;
|
|
|
|
case llvm::AtomicOrderingCABI::consume:
|
|
|
|
case llvm::AtomicOrderingCABI::acquire:
|
|
|
|
FailureOrder = llvm::AtomicOrdering::Acquire;
|
|
|
|
break;
|
|
|
|
case llvm::AtomicOrderingCABI::seq_cst:
|
|
|
|
FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
|
|
|
|
break;
|
|
|
|
}
|
2016-04-07 07:37:36 +08:00
|
|
|
if (isStrongerThan(FailureOrder, SuccessOrder)) {
|
|
|
|
// Don't assert on undefined behavior "failure argument shall be no
|
|
|
|
// stronger than the success argument".
|
2014-03-14 03:25:48 +08:00
|
|
|
FailureOrder =
|
2016-04-19 02:01:49 +08:00
|
|
|
llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
|
2014-03-14 03:25:48 +08:00
|
|
|
}
|
2016-04-19 02:01:49 +08:00
|
|
|
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
|
2017-08-05 02:16:31 +08:00
|
|
|
FailureOrder, Scope);
|
2014-03-14 03:25:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create all the relevant BB's
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
|
|
|
|
*SeqCstBB = nullptr;
|
2014-03-14 03:25:48 +08:00
|
|
|
MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
|
2016-04-07 01:26:42 +08:00
|
|
|
if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
|
|
|
|
SuccessOrder != llvm::AtomicOrdering::Release)
|
2014-03-14 03:25:48 +08:00
|
|
|
AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
|
2016-04-07 01:26:42 +08:00
|
|
|
if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
|
2014-03-14 03:25:48 +08:00
|
|
|
SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
|
|
|
|
|
|
|
|
llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
|
|
|
|
|
|
|
|
llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
|
|
|
|
|
|
|
|
// Emit all the different atomics
|
|
|
|
|
|
|
|
// MonotonicBB is arbitrarily chosen as the default case; in practice, this
|
|
|
|
// doesn't matter unless someone is crazy enough to use something that
|
|
|
|
// doesn't fold to a constant for the ordering.
|
|
|
|
CGF.Builder.SetInsertPoint(MonotonicBB);
|
2014-06-14 03:43:04 +08:00
|
|
|
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
|
2017-08-05 02:16:31 +08:00
|
|
|
Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
|
2014-03-14 03:25:48 +08:00
|
|
|
CGF.Builder.CreateBr(ContBB);
|
|
|
|
|
|
|
|
if (AcquireBB) {
|
|
|
|
CGF.Builder.SetInsertPoint(AcquireBB);
|
2014-06-14 03:43:04 +08:00
|
|
|
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
|
2017-08-05 02:16:31 +08:00
|
|
|
Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
|
2014-03-14 03:25:48 +08:00
|
|
|
CGF.Builder.CreateBr(ContBB);
|
2016-04-19 02:01:49 +08:00
|
|
|
SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
|
2014-03-14 03:25:48 +08:00
|
|
|
AcquireBB);
|
2016-04-19 02:01:49 +08:00
|
|
|
SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
|
2014-03-14 03:25:48 +08:00
|
|
|
AcquireBB);
|
|
|
|
}
|
|
|
|
if (SeqCstBB) {
|
|
|
|
CGF.Builder.SetInsertPoint(SeqCstBB);
|
2016-04-07 01:26:42 +08:00
|
|
|
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
|
2017-08-05 02:16:31 +08:00
|
|
|
llvm::AtomicOrdering::SequentiallyConsistent, Scope);
|
2014-03-14 03:25:48 +08:00
|
|
|
CGF.Builder.CreateBr(ContBB);
|
2016-04-19 02:01:49 +08:00
|
|
|
SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
|
2014-03-14 03:25:48 +08:00
|
|
|
SeqCstBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
CGF.Builder.SetInsertPoint(ContBB);
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
|
|
|
|
Address Ptr, Address Val1, Address Val2,
|
2014-06-14 03:43:04 +08:00
|
|
|
llvm::Value *IsWeak, llvm::Value *FailureOrder,
|
2017-08-05 02:16:31 +08:00
|
|
|
uint64_t Size, llvm::AtomicOrdering Order,
|
|
|
|
llvm::SyncScope::ID Scope) {
|
2013-03-08 05:37:12 +08:00
|
|
|
llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
|
|
|
|
llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
|
|
|
|
|
|
|
|
switch (E->getOp()) {
|
|
|
|
case AtomicExpr::AO__c11_atomic_init:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_init:
|
2013-03-08 05:37:12 +08:00
|
|
|
llvm_unreachable("Already handled!");
|
|
|
|
|
|
|
|
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
|
2014-06-14 03:43:04 +08:00
|
|
|
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
|
2017-08-05 02:16:31 +08:00
|
|
|
FailureOrder, Size, Order, Scope);
|
2014-06-14 03:43:04 +08:00
|
|
|
return;
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
|
2014-06-14 03:43:04 +08:00
|
|
|
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
|
2017-08-05 02:16:31 +08:00
|
|
|
FailureOrder, Size, Order, Scope);
|
2014-06-14 03:43:04 +08:00
|
|
|
return;
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_compare_exchange:
|
2014-06-14 03:43:04 +08:00
|
|
|
case AtomicExpr::AO__atomic_compare_exchange_n: {
|
|
|
|
if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
|
|
|
|
emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
|
2017-08-05 02:16:31 +08:00
|
|
|
Val1, Val2, FailureOrder, Size, Order, Scope);
|
2014-06-14 03:43:04 +08:00
|
|
|
} else {
|
|
|
|
// Create all the relevant BB's
|
|
|
|
llvm::BasicBlock *StrongBB =
|
|
|
|
CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
|
|
|
|
llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
|
|
|
|
llvm::BasicBlock *ContBB =
|
|
|
|
CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
|
|
|
|
|
|
|
|
llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
|
|
|
|
SI->addCase(CGF.Builder.getInt1(false), StrongBB);
|
|
|
|
|
|
|
|
CGF.Builder.SetInsertPoint(StrongBB);
|
|
|
|
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
|
2017-08-05 02:16:31 +08:00
|
|
|
FailureOrder, Size, Order, Scope);
|
2014-06-14 03:43:04 +08:00
|
|
|
CGF.Builder.CreateBr(ContBB);
|
|
|
|
|
|
|
|
CGF.Builder.SetInsertPoint(WeakBB);
|
|
|
|
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
|
2017-08-05 02:16:31 +08:00
|
|
|
FailureOrder, Size, Order, Scope);
|
2014-06-14 03:43:04 +08:00
|
|
|
CGF.Builder.CreateBr(ContBB);
|
|
|
|
|
|
|
|
CGF.Builder.SetInsertPoint(ContBB);
|
|
|
|
}
|
2013-03-08 05:37:12 +08:00
|
|
|
return;
|
2014-06-14 03:43:04 +08:00
|
|
|
}
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_load:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_load:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_load_n:
|
|
|
|
case AtomicExpr::AO__atomic_load: {
|
|
|
|
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
|
2017-08-05 02:16:31 +08:00
|
|
|
Load->setAtomic(Order, Scope);
|
2013-03-08 05:37:12 +08:00
|
|
|
Load->setVolatile(E->isVolatile());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.Builder.CreateStore(Load, Dest);
|
2013-03-08 05:37:12 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
case AtomicExpr::AO__c11_atomic_store:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_store:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_store:
|
|
|
|
case AtomicExpr::AO__atomic_store_n: {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
|
2013-03-08 05:37:12 +08:00
|
|
|
llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
|
2017-08-05 02:16:31 +08:00
|
|
|
Store->setAtomic(Order, Scope);
|
2013-03-08 05:37:12 +08:00
|
|
|
Store->setVolatile(E->isVolatile());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
case AtomicExpr::AO__c11_atomic_exchange:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_exchange:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_exchange_n:
|
|
|
|
case AtomicExpr::AO__atomic_exchange:
|
|
|
|
Op = llvm::AtomicRMWInst::Xchg;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_add_fetch:
|
|
|
|
PostOp = llvm::Instruction::Add;
|
2017-12-20 06:06:11 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_add:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_add:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_add:
|
|
|
|
Op = llvm::AtomicRMWInst::Add;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_sub_fetch:
|
|
|
|
PostOp = llvm::Instruction::Sub;
|
2017-12-20 06:06:11 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_sub:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_sub:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_sub:
|
|
|
|
Op = llvm::AtomicRMWInst::Sub;
|
|
|
|
break;
|
|
|
|
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_min:
|
|
|
|
Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
|
|
|
|
: llvm::AtomicRMWInst::UMin;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_max:
|
|
|
|
Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
|
|
|
|
: llvm::AtomicRMWInst::UMax;
|
|
|
|
break;
|
|
|
|
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_and_fetch:
|
|
|
|
PostOp = llvm::Instruction::And;
|
2017-12-20 06:06:11 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_and:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_and:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_and:
|
|
|
|
Op = llvm::AtomicRMWInst::And;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_or_fetch:
|
|
|
|
PostOp = llvm::Instruction::Or;
|
2017-12-20 06:06:11 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_or:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_or:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_or:
|
|
|
|
Op = llvm::AtomicRMWInst::Or;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_xor_fetch:
|
|
|
|
PostOp = llvm::Instruction::Xor;
|
2017-12-20 06:06:11 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_xor:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_xor:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_xor:
|
|
|
|
Op = llvm::AtomicRMWInst::Xor;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_nand_fetch:
|
2015-11-13 02:37:29 +08:00
|
|
|
PostOp = llvm::Instruction::And; // the NOT is special cased below
|
2017-12-20 06:06:11 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_nand:
|
|
|
|
Op = llvm::AtomicRMWInst::Nand;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
|
2013-03-08 05:37:12 +08:00
|
|
|
llvm::AtomicRMWInst *RMWI =
|
2017-08-05 02:16:31 +08:00
|
|
|
CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
|
2013-03-08 05:37:12 +08:00
|
|
|
RMWI->setVolatile(E->isVolatile());
|
|
|
|
|
|
|
|
// For __atomic_*_fetch operations, perform the operation again to
|
|
|
|
// determine the value which was written.
|
|
|
|
llvm::Value *Result = RMWI;
|
|
|
|
if (PostOp)
|
|
|
|
Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
|
|
|
|
if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
|
|
|
|
Result = CGF.Builder.CreateNot(Result);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.Builder.CreateStore(Result, Dest);
|
2013-03-08 05:37:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// This function emits any expression (scalar, complex, or aggregate)
|
|
|
|
// into a temporary alloca.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
static Address
|
2013-03-08 05:37:12 +08:00
|
|
|
EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
|
2013-03-08 05:37:12 +08:00
|
|
|
CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
|
|
|
|
/*Init*/ true);
|
|
|
|
return DeclPtr;
|
|
|
|
}
|
|
|
|
|
2017-08-16 00:02:49 +08:00
|
|
|
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
|
|
|
|
Address Ptr, Address Val1, Address Val2,
|
|
|
|
llvm::Value *IsWeak, llvm::Value *FailureOrder,
|
|
|
|
uint64_t Size, llvm::AtomicOrdering Order,
|
|
|
|
llvm::Value *Scope) {
|
|
|
|
auto ScopeModel = Expr->getScopeModel();
|
|
|
|
|
|
|
|
// LLVM atomic instructions always have synch scope. If clang atomic
|
|
|
|
// expression has no scope operand, use default LLVM synch scope.
|
|
|
|
if (!ScopeModel) {
|
|
|
|
EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
|
|
|
|
Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle constant scope.
|
|
|
|
if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
|
|
|
|
auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
|
|
|
|
ScopeModel->map(SC->getZExtValue()), CGF.CGM.getLLVMContext());
|
|
|
|
EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
|
|
|
|
Order, SCID);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle non-constant scope.
|
|
|
|
auto &Builder = CGF.Builder;
|
|
|
|
auto Scopes = ScopeModel->getRuntimeValues();
|
|
|
|
llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
|
|
|
|
for (auto S : Scopes)
|
|
|
|
BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
|
|
|
|
|
|
|
|
llvm::BasicBlock *ContBB =
|
|
|
|
CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
|
|
|
|
|
|
|
|
auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
|
|
|
|
// If unsupported synch scope is encountered at run time, assume a fallback
|
|
|
|
// synch scope value.
|
|
|
|
auto FallBack = ScopeModel->getFallBackValue();
|
|
|
|
llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
|
|
|
|
for (auto S : Scopes) {
|
|
|
|
auto *B = BB[S];
|
|
|
|
if (S != FallBack)
|
|
|
|
SI->addCase(Builder.getInt32(S), B);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(B);
|
|
|
|
EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
|
|
|
|
Order,
|
|
|
|
CGF.getTargetHooks().getLLVMSyncScopeID(ScopeModel->map(S),
|
|
|
|
CGF.getLLVMContext()));
|
|
|
|
Builder.CreateBr(ContBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(ContBB);
|
|
|
|
}
|
|
|
|
|
2013-06-01 03:27:59 +08:00
|
|
|
static void
|
|
|
|
AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
|
2013-10-02 10:29:49 +08:00
|
|
|
bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
|
2014-08-29 15:27:49 +08:00
|
|
|
SourceLocation Loc, CharUnits SizeInChars) {
|
2013-06-01 03:27:59 +08:00
|
|
|
if (UseOptimizedLibcall) {
|
|
|
|
// Load value and pass it to the function directly.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
|
2014-08-29 15:27:49 +08:00
|
|
|
int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
|
|
|
|
ValTy =
|
|
|
|
CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
|
|
|
|
llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
|
|
|
|
SizeInBits)->getPointerTo();
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
|
|
|
|
Val = CGF.EmitLoadOfScalar(Ptr, false,
|
|
|
|
CGF.getContext().getPointerType(ValTy),
|
2014-08-29 15:27:49 +08:00
|
|
|
Loc);
|
|
|
|
// Coerce the value into an appropriately sized integer type.
|
2013-06-01 03:27:59 +08:00
|
|
|
Args.add(RValue::get(Val), ValTy);
|
|
|
|
} else {
|
|
|
|
// Non-optimized functions always take a reference.
|
|
|
|
Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
|
|
|
|
CGF.getContext().VoidPtrTy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-10 03:56:35 +08:00
|
|
|
RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
|
2013-03-08 05:37:12 +08:00
|
|
|
QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
|
|
|
|
QualType MemTy = AtomicTy;
|
|
|
|
if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
|
|
|
|
MemTy = AT->getValueType();
|
2017-09-11 15:35:01 +08:00
|
|
|
llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
|
|
|
|
|
|
|
|
Address Val1 = Address::invalid();
|
|
|
|
Address Val2 = Address::invalid();
|
|
|
|
Address Dest = Address::invalid();
|
2017-09-26 03:57:59 +08:00
|
|
|
Address Ptr = EmitPointerWithAlignment(E->getPtr());
|
|
|
|
|
|
|
|
CharUnits sizeChars, alignChars;
|
|
|
|
std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
|
|
|
|
uint64_t Size = sizeChars.getQuantity();
|
|
|
|
unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
|
|
|
|
bool UseLibcall = ((Ptr.getAlignment() % sizeChars) != 0 ||
|
|
|
|
getContext().toBits(sizeChars) > MaxInlineWidthInBits);
|
2017-09-11 15:35:01 +08:00
|
|
|
|
2017-08-05 02:16:31 +08:00
|
|
|
if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
|
|
|
|
E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
|
2013-03-08 05:37:17 +08:00
|
|
|
EmitAtomicInit(E->getVal1(), lvalue);
|
2014-05-21 13:09:00 +08:00
|
|
|
return RValue::get(nullptr);
|
2013-03-08 05:37:12 +08:00
|
|
|
}
|
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value *Order = EmitScalarExpr(E->getOrder());
|
2017-08-16 00:02:49 +08:00
|
|
|
llvm::Value *Scope =
|
|
|
|
E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
|
2013-03-08 05:37:12 +08:00
|
|
|
|
|
|
|
switch (E->getOp()) {
|
|
|
|
case AtomicExpr::AO__c11_atomic_init:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_init:
|
2015-08-06 00:57:36 +08:00
|
|
|
llvm_unreachable("Already handled above with EmitAtomicInit!");
|
2013-03-08 05:37:12 +08:00
|
|
|
|
|
|
|
case AtomicExpr::AO__c11_atomic_load:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_load:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_load_n:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_load:
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Dest = EmitPointerWithAlignment(E->getVal1());
|
2013-03-08 05:37:12 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_store:
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Val1 = EmitPointerWithAlignment(E->getVal1());
|
2013-03-08 05:37:12 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__atomic_exchange:
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Val1 = EmitPointerWithAlignment(E->getVal1());
|
|
|
|
Dest = EmitPointerWithAlignment(E->getVal2());
|
2013-03-08 05:37:12 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
|
|
|
|
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
|
|
|
|
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_compare_exchange_n:
|
|
|
|
case AtomicExpr::AO__atomic_compare_exchange:
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Val1 = EmitPointerWithAlignment(E->getVal1());
|
2013-03-08 05:37:12 +08:00
|
|
|
if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Val2 = EmitPointerWithAlignment(E->getVal2());
|
2013-03-08 05:37:12 +08:00
|
|
|
else
|
|
|
|
Val2 = EmitValToTemp(*this, E->getVal2());
|
|
|
|
OrderFail = EmitScalarExpr(E->getOrderFail());
|
2017-08-05 02:16:31 +08:00
|
|
|
if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
|
|
|
|
E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
|
2014-06-14 03:43:04 +08:00
|
|
|
IsWeak = EmitScalarExpr(E->getWeak());
|
2013-03-08 05:37:12 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_add:
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_sub:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_add:
|
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_sub:
|
2013-03-08 05:37:12 +08:00
|
|
|
if (MemTy->isPointerType()) {
|
|
|
|
// For pointer arithmetic, we're required to do a bit of math:
|
|
|
|
// adding 1 to an int* is not the same as adding 1 to a uintptr_t.
|
|
|
|
// ... but only for the C11 builtins. The GNU builtins expect the
|
|
|
|
// user to multiply by sizeof(T).
|
|
|
|
QualType Val1Ty = E->getVal1()->getType();
|
|
|
|
llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
|
|
|
|
CharUnits PointeeIncAmt =
|
|
|
|
getContext().getTypeSizeInChars(MemTy->getPointeeType());
|
|
|
|
Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
|
|
|
|
Val1 = Temp;
|
|
|
|
EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
|
2013-03-08 05:37:12 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-12-20 06:06:11 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_add:
|
|
|
|
case AtomicExpr::AO__atomic_fetch_sub:
|
|
|
|
case AtomicExpr::AO__atomic_add_fetch:
|
|
|
|
case AtomicExpr::AO__atomic_sub_fetch:
|
|
|
|
case AtomicExpr::AO__c11_atomic_store:
|
|
|
|
case AtomicExpr::AO__c11_atomic_exchange:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_store:
|
|
|
|
case AtomicExpr::AO__opencl_atomic_exchange:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_store_n:
|
|
|
|
case AtomicExpr::AO__atomic_exchange_n:
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_and:
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_or:
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_xor:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_and:
|
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_or:
|
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_xor:
|
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_min:
|
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_max:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_and:
|
|
|
|
case AtomicExpr::AO__atomic_fetch_or:
|
|
|
|
case AtomicExpr::AO__atomic_fetch_xor:
|
|
|
|
case AtomicExpr::AO__atomic_fetch_nand:
|
|
|
|
case AtomicExpr::AO__atomic_and_fetch:
|
|
|
|
case AtomicExpr::AO__atomic_or_fetch:
|
|
|
|
case AtomicExpr::AO__atomic_xor_fetch:
|
|
|
|
case AtomicExpr::AO__atomic_nand_fetch:
|
|
|
|
Val1 = EmitValToTemp(*this, E->getVal1());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-12-12 16:16:09 +08:00
|
|
|
QualType RValTy = E->getType().getUnqualifiedType();
|
|
|
|
|
2015-11-10 03:56:35 +08:00
|
|
|
// The inlined atomics only function on iN types, where N is a power of 2. We
|
|
|
|
// need to make sure (via temporaries if necessary) that all incoming values
|
|
|
|
// are compatible.
|
|
|
|
LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
|
|
|
|
AtomicInfo Atomics(*this, AtomicVal);
|
|
|
|
|
|
|
|
Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
|
|
|
|
if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
|
|
|
|
if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
|
|
|
|
if (Dest.isValid())
|
|
|
|
Dest = Atomics.emitCastToAtomicIntPointer(Dest);
|
|
|
|
else if (E->isCmpXChg())
|
|
|
|
Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
|
|
|
|
else if (!RValTy->isVoidType())
|
|
|
|
Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
|
2013-03-08 05:37:12 +08:00
|
|
|
|
|
|
|
// Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
|
|
|
|
if (UseLibcall) {
|
2013-06-01 03:27:59 +08:00
|
|
|
bool UseOptimizedLibcall = false;
|
|
|
|
switch (E->getOp()) {
|
2015-08-06 00:57:36 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_init:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_init:
|
2015-08-06 00:57:36 +08:00
|
|
|
llvm_unreachable("Already handled above with EmitAtomicInit!");
|
|
|
|
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_add:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_add:
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_add:
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_and:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_and:
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_and:
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_or:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_or:
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_or:
|
2015-08-06 00:57:36 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_nand:
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_sub:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_sub:
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_sub:
|
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_xor:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_xor:
|
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_min:
|
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_max:
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_xor:
|
2015-08-06 00:57:36 +08:00
|
|
|
case AtomicExpr::AO__atomic_add_fetch:
|
|
|
|
case AtomicExpr::AO__atomic_and_fetch:
|
|
|
|
case AtomicExpr::AO__atomic_nand_fetch:
|
|
|
|
case AtomicExpr::AO__atomic_or_fetch:
|
|
|
|
case AtomicExpr::AO__atomic_sub_fetch:
|
|
|
|
case AtomicExpr::AO__atomic_xor_fetch:
|
2013-06-01 03:27:59 +08:00
|
|
|
// For these, only library calls for certain sizes exist.
|
|
|
|
UseOptimizedLibcall = true;
|
|
|
|
break;
|
2015-08-06 00:57:36 +08:00
|
|
|
|
|
|
|
case AtomicExpr::AO__c11_atomic_load:
|
|
|
|
case AtomicExpr::AO__c11_atomic_store:
|
|
|
|
case AtomicExpr::AO__c11_atomic_exchange:
|
|
|
|
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
|
|
|
|
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_load:
|
|
|
|
case AtomicExpr::AO__opencl_atomic_store:
|
|
|
|
case AtomicExpr::AO__opencl_atomic_exchange:
|
|
|
|
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
|
|
|
|
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
|
2015-08-06 00:57:36 +08:00
|
|
|
case AtomicExpr::AO__atomic_load_n:
|
|
|
|
case AtomicExpr::AO__atomic_load:
|
|
|
|
case AtomicExpr::AO__atomic_store_n:
|
|
|
|
case AtomicExpr::AO__atomic_store:
|
|
|
|
case AtomicExpr::AO__atomic_exchange_n:
|
|
|
|
case AtomicExpr::AO__atomic_exchange:
|
|
|
|
case AtomicExpr::AO__atomic_compare_exchange_n:
|
|
|
|
case AtomicExpr::AO__atomic_compare_exchange:
|
2013-06-01 03:27:59 +08:00
|
|
|
// Only use optimized library calls for sizes for which they exist.
|
|
|
|
if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
|
|
|
|
UseOptimizedLibcall = true;
|
|
|
|
break;
|
|
|
|
}
|
2013-03-08 05:37:12 +08:00
|
|
|
|
|
|
|
CallArgList Args;
|
2013-06-01 03:27:59 +08:00
|
|
|
if (!UseOptimizedLibcall) {
|
|
|
|
// For non-optimized library calls, the size is the first parameter
|
|
|
|
Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
|
|
|
|
getContext().getSizeType());
|
|
|
|
}
|
|
|
|
// Atomic address is the first or second parameter
|
2017-08-05 02:16:31 +08:00
|
|
|
// The OpenCL atomic library functions only accept pointer arguments to
|
|
|
|
// generic address space.
|
|
|
|
auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
|
|
|
|
if (!E->isOpenCL())
|
|
|
|
return V;
|
|
|
|
auto AS = PT->getAs<PointerType>()->getPointeeType().getAddressSpace();
|
|
|
|
if (AS == LangAS::opencl_generic)
|
|
|
|
return V;
|
|
|
|
auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
|
|
|
|
auto T = V->getType();
|
|
|
|
auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
|
|
|
|
|
|
|
|
return getTargetHooks().performAddrSpaceCast(
|
|
|
|
*this, V, AS, LangAS::opencl_generic, DestType, false);
|
|
|
|
};
|
|
|
|
|
|
|
|
Args.add(RValue::get(CastToGenericAddrSpace(
|
|
|
|
EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
getContext().VoidPtrTy);
|
2013-03-08 05:37:12 +08:00
|
|
|
|
2013-06-01 03:27:59 +08:00
|
|
|
std::string LibCallName;
|
2014-03-27 01:35:01 +08:00
|
|
|
QualType LoweredMemTy =
|
|
|
|
MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
|
2013-06-01 03:27:59 +08:00
|
|
|
QualType RetTy;
|
|
|
|
bool HaveRetTy = false;
|
2015-11-13 02:37:29 +08:00
|
|
|
llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
|
2013-03-08 05:37:12 +08:00
|
|
|
switch (E->getOp()) {
|
2015-08-06 00:57:36 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_init:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_init:
|
2015-08-06 00:57:36 +08:00
|
|
|
llvm_unreachable("Already handled!");
|
|
|
|
|
2013-03-08 05:37:12 +08:00
|
|
|
// There is only one libcall for compare an exchange, because there is no
|
|
|
|
// optimisation benefit possible from a libcall version of a weak compare
|
|
|
|
// and exchange.
|
2013-06-01 03:27:59 +08:00
|
|
|
// bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
|
2013-03-08 05:37:12 +08:00
|
|
|
// void *desired, int success, int failure)
|
2013-06-01 03:27:59 +08:00
|
|
|
// bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
|
|
|
|
// int success, int failure)
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
|
|
|
|
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
|
|
|
|
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_compare_exchange:
|
|
|
|
case AtomicExpr::AO__atomic_compare_exchange_n:
|
|
|
|
LibCallName = "__atomic_compare_exchange";
|
|
|
|
RetTy = getContext().BoolTy;
|
2013-06-01 03:27:59 +08:00
|
|
|
HaveRetTy = true;
|
2017-08-05 02:16:31 +08:00
|
|
|
Args.add(
|
|
|
|
RValue::get(CastToGenericAddrSpace(
|
|
|
|
EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
|
|
|
|
getContext().VoidPtrTy);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
|
|
|
|
MemTy, E->getExprLoc(), sizeChars);
|
2013-10-02 05:51:38 +08:00
|
|
|
Args.add(RValue::get(Order), getContext().IntTy);
|
2013-03-08 05:37:12 +08:00
|
|
|
Order = OrderFail;
|
|
|
|
break;
|
|
|
|
// void __atomic_exchange(size_t size, void *mem, void *val, void *return,
|
|
|
|
// int order)
|
2013-06-01 03:27:59 +08:00
|
|
|
// T __atomic_exchange_N(T *mem, T val, int order)
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_exchange:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_exchange:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_exchange_n:
|
|
|
|
case AtomicExpr::AO__atomic_exchange:
|
|
|
|
LibCallName = "__atomic_exchange";
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
|
|
|
|
MemTy, E->getExprLoc(), sizeChars);
|
2013-03-08 05:37:12 +08:00
|
|
|
break;
|
|
|
|
// void __atomic_store(size_t size, void *mem, void *val, int order)
|
2013-06-01 03:27:59 +08:00
|
|
|
// void __atomic_store_N(T *mem, T val, int order)
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_store:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_store:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_store:
|
|
|
|
case AtomicExpr::AO__atomic_store_n:
|
|
|
|
LibCallName = "__atomic_store";
|
2013-06-01 03:27:59 +08:00
|
|
|
RetTy = getContext().VoidTy;
|
|
|
|
HaveRetTy = true;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
|
|
|
|
MemTy, E->getExprLoc(), sizeChars);
|
2013-03-08 05:37:12 +08:00
|
|
|
break;
|
|
|
|
// void __atomic_load(size_t size, void *mem, void *return, int order)
|
2013-06-01 03:27:59 +08:00
|
|
|
// T __atomic_load_N(T *mem, int order)
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_load:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_load:
|
2013-03-08 05:37:12 +08:00
|
|
|
case AtomicExpr::AO__atomic_load:
|
|
|
|
case AtomicExpr::AO__atomic_load_n:
|
|
|
|
LibCallName = "__atomic_load";
|
2013-06-01 03:27:59 +08:00
|
|
|
break;
|
2015-11-13 02:37:29 +08:00
|
|
|
// T __atomic_add_fetch_N(T *mem, T val, int order)
|
2013-06-01 03:27:59 +08:00
|
|
|
// T __atomic_fetch_add_N(T *mem, T val, int order)
|
2015-11-13 02:37:29 +08:00
|
|
|
case AtomicExpr::AO__atomic_add_fetch:
|
|
|
|
PostOp = llvm::Instruction::Add;
|
2017-12-20 06:06:11 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_add:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_add:
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_add:
|
|
|
|
LibCallName = "__atomic_fetch_add";
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
|
|
|
|
LoweredMemTy, E->getExprLoc(), sizeChars);
|
2013-06-01 03:27:59 +08:00
|
|
|
break;
|
2015-11-13 02:37:29 +08:00
|
|
|
// T __atomic_and_fetch_N(T *mem, T val, int order)
|
2013-06-01 03:27:59 +08:00
|
|
|
// T __atomic_fetch_and_N(T *mem, T val, int order)
|
2015-11-13 02:37:29 +08:00
|
|
|
case AtomicExpr::AO__atomic_and_fetch:
|
|
|
|
PostOp = llvm::Instruction::And;
|
2017-12-20 06:06:11 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_and:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_and:
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_and:
|
|
|
|
LibCallName = "__atomic_fetch_and";
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
|
|
|
|
MemTy, E->getExprLoc(), sizeChars);
|
2013-06-01 03:27:59 +08:00
|
|
|
break;
|
2015-11-13 02:37:29 +08:00
|
|
|
// T __atomic_or_fetch_N(T *mem, T val, int order)
|
2013-06-01 03:27:59 +08:00
|
|
|
// T __atomic_fetch_or_N(T *mem, T val, int order)
|
2015-11-13 02:37:29 +08:00
|
|
|
case AtomicExpr::AO__atomic_or_fetch:
|
|
|
|
PostOp = llvm::Instruction::Or;
|
2017-12-20 06:06:11 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_or:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_or:
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_or:
|
|
|
|
LibCallName = "__atomic_fetch_or";
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
|
|
|
|
MemTy, E->getExprLoc(), sizeChars);
|
2013-06-01 03:27:59 +08:00
|
|
|
break;
|
2015-11-13 02:37:29 +08:00
|
|
|
// T __atomic_sub_fetch_N(T *mem, T val, int order)
|
2013-06-01 03:27:59 +08:00
|
|
|
// T __atomic_fetch_sub_N(T *mem, T val, int order)
|
2015-11-13 02:37:29 +08:00
|
|
|
case AtomicExpr::AO__atomic_sub_fetch:
|
|
|
|
PostOp = llvm::Instruction::Sub;
|
2017-12-20 06:06:11 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_sub:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_sub:
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_sub:
|
|
|
|
LibCallName = "__atomic_fetch_sub";
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
|
|
|
|
LoweredMemTy, E->getExprLoc(), sizeChars);
|
2013-06-01 03:27:59 +08:00
|
|
|
break;
|
2015-11-13 02:37:29 +08:00
|
|
|
// T __atomic_xor_fetch_N(T *mem, T val, int order)
|
2013-06-01 03:27:59 +08:00
|
|
|
// T __atomic_fetch_xor_N(T *mem, T val, int order)
|
2015-11-13 02:37:29 +08:00
|
|
|
case AtomicExpr::AO__atomic_xor_fetch:
|
|
|
|
PostOp = llvm::Instruction::Xor;
|
2017-12-20 06:06:11 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__c11_atomic_fetch_xor:
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_xor:
|
2013-06-01 03:27:59 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_xor:
|
|
|
|
LibCallName = "__atomic_fetch_xor";
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
|
|
|
|
MemTy, E->getExprLoc(), sizeChars);
|
2013-03-08 05:37:12 +08:00
|
|
|
break;
|
2017-08-05 02:16:31 +08:00
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_min:
|
|
|
|
LibCallName = E->getValueType()->isSignedIntegerType()
|
|
|
|
? "__atomic_fetch_min"
|
|
|
|
: "__atomic_fetch_umin";
|
|
|
|
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
|
|
|
|
LoweredMemTy, E->getExprLoc(), sizeChars);
|
|
|
|
break;
|
|
|
|
case AtomicExpr::AO__opencl_atomic_fetch_max:
|
|
|
|
LibCallName = E->getValueType()->isSignedIntegerType()
|
|
|
|
? "__atomic_fetch_max"
|
|
|
|
: "__atomic_fetch_umax";
|
|
|
|
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
|
|
|
|
LoweredMemTy, E->getExprLoc(), sizeChars);
|
|
|
|
break;
|
2015-11-13 02:37:29 +08:00
|
|
|
// T __atomic_nand_fetch_N(T *mem, T val, int order)
|
2015-08-06 00:57:36 +08:00
|
|
|
// T __atomic_fetch_nand_N(T *mem, T val, int order)
|
2015-11-13 02:37:29 +08:00
|
|
|
case AtomicExpr::AO__atomic_nand_fetch:
|
|
|
|
PostOp = llvm::Instruction::And; // the NOT is special cased below
|
2017-12-20 06:06:11 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2015-08-06 00:57:36 +08:00
|
|
|
case AtomicExpr::AO__atomic_fetch_nand:
|
|
|
|
LibCallName = "__atomic_fetch_nand";
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
|
|
|
|
MemTy, E->getExprLoc(), sizeChars);
|
2015-08-06 00:57:36 +08:00
|
|
|
break;
|
2013-03-08 05:37:12 +08:00
|
|
|
}
|
2013-06-01 03:27:59 +08:00
|
|
|
|
2017-08-05 02:16:31 +08:00
|
|
|
if (E->isOpenCL()) {
|
|
|
|
LibCallName = std::string("__opencl") +
|
|
|
|
StringRef(LibCallName).drop_front(1).str();
|
|
|
|
|
|
|
|
}
|
2013-06-01 03:27:59 +08:00
|
|
|
// Optimized functions have the size in their name.
|
|
|
|
if (UseOptimizedLibcall)
|
|
|
|
LibCallName += "_" + llvm::utostr(Size);
|
|
|
|
// By default, assume we return a value of the atomic type.
|
|
|
|
if (!HaveRetTy) {
|
|
|
|
if (UseOptimizedLibcall) {
|
|
|
|
// Value is returned directly.
|
2014-08-29 15:27:49 +08:00
|
|
|
// The function returns an appropriately sized integer type.
|
|
|
|
RetTy = getContext().getIntTypeForBitwidth(
|
|
|
|
getContext().toBits(sizeChars), /*Signed=*/false);
|
2013-06-01 03:27:59 +08:00
|
|
|
} else {
|
|
|
|
// Value is returned through parameter before the order.
|
|
|
|
RetTy = getContext().VoidTy;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
|
|
|
|
getContext().VoidPtrTy);
|
2013-06-01 03:27:59 +08:00
|
|
|
}
|
|
|
|
}
|
2013-03-08 05:37:12 +08:00
|
|
|
// order is always the last parameter
|
|
|
|
Args.add(RValue::get(Order),
|
|
|
|
getContext().IntTy);
|
2017-08-05 02:16:31 +08:00
|
|
|
if (E->isOpenCL())
|
|
|
|
Args.add(RValue::get(Scope), getContext().IntTy);
|
2013-03-08 05:37:12 +08:00
|
|
|
|
2015-11-13 02:37:29 +08:00
|
|
|
// PostOp is only needed for the atomic_*_fetch operations, and
|
|
|
|
// thus is only needed for and implemented in the
|
|
|
|
// UseOptimizedLibcall codepath.
|
|
|
|
assert(UseOptimizedLibcall || !PostOp);
|
|
|
|
|
2014-11-26 07:44:32 +08:00
|
|
|
RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
|
|
|
|
// The value is returned directly from the libcall.
|
2015-11-10 03:56:35 +08:00
|
|
|
if (E->isCmpXChg())
|
2014-11-26 07:44:32 +08:00
|
|
|
return Res;
|
2015-11-10 03:56:35 +08:00
|
|
|
|
|
|
|
// The value is returned directly for optimized libcalls but the expr
|
|
|
|
// provided an out-param.
|
|
|
|
if (UseOptimizedLibcall && Res.getScalarVal()) {
|
2014-11-26 07:44:32 +08:00
|
|
|
llvm::Value *ResVal = Res.getScalarVal();
|
2015-11-13 02:37:29 +08:00
|
|
|
if (PostOp) {
|
2018-03-10 09:47:22 +08:00
|
|
|
llvm::Value *LoadVal1 = Args[1].RV.getScalarVal();
|
2015-11-13 02:37:29 +08:00
|
|
|
ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
|
|
|
|
}
|
|
|
|
if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
|
|
|
|
ResVal = Builder.CreateNot(ResVal);
|
|
|
|
|
2015-11-10 03:56:35 +08:00
|
|
|
Builder.CreateStore(
|
|
|
|
ResVal,
|
|
|
|
Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
|
2014-11-26 07:44:32 +08:00
|
|
|
}
|
2015-11-10 03:56:35 +08:00
|
|
|
|
|
|
|
if (RValTy->isVoidType())
|
|
|
|
return RValue::get(nullptr);
|
|
|
|
|
|
|
|
return convertTempToRValue(
|
|
|
|
Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
|
|
|
|
RValTy, E->getExprLoc());
|
2013-03-08 05:37:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
|
2017-08-05 02:16:31 +08:00
|
|
|
E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
|
2013-03-08 05:37:12 +08:00
|
|
|
E->getOp() == AtomicExpr::AO__atomic_store ||
|
|
|
|
E->getOp() == AtomicExpr::AO__atomic_store_n;
|
|
|
|
bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
|
2017-08-05 02:16:31 +08:00
|
|
|
E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
|
2013-03-08 05:37:12 +08:00
|
|
|
E->getOp() == AtomicExpr::AO__atomic_load ||
|
|
|
|
E->getOp() == AtomicExpr::AO__atomic_load_n;
|
|
|
|
|
|
|
|
if (isa<llvm::ConstantInt>(Order)) {
|
2016-04-19 02:01:49 +08:00
|
|
|
auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
|
|
|
|
// We should not ever get to a case where the ordering isn't a valid C ABI
|
|
|
|
// value, but it's hard to enforce that in general.
|
|
|
|
if (llvm::isValidAtomicOrderingCABI(ord))
|
|
|
|
switch ((llvm::AtomicOrderingCABI)ord) {
|
|
|
|
case llvm::AtomicOrderingCABI::relaxed:
|
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
|
2017-08-16 00:02:49 +08:00
|
|
|
llvm::AtomicOrdering::Monotonic, Scope);
|
2016-04-19 02:01:49 +08:00
|
|
|
break;
|
|
|
|
case llvm::AtomicOrderingCABI::consume:
|
|
|
|
case llvm::AtomicOrderingCABI::acquire:
|
|
|
|
if (IsStore)
|
|
|
|
break; // Avoid crashing on code with undefined behavior
|
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
|
2017-08-16 00:02:49 +08:00
|
|
|
llvm::AtomicOrdering::Acquire, Scope);
|
2016-04-19 02:01:49 +08:00
|
|
|
break;
|
|
|
|
case llvm::AtomicOrderingCABI::release:
|
|
|
|
if (IsLoad)
|
|
|
|
break; // Avoid crashing on code with undefined behavior
|
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
|
2017-08-16 00:02:49 +08:00
|
|
|
llvm::AtomicOrdering::Release, Scope);
|
2016-04-19 02:01:49 +08:00
|
|
|
break;
|
|
|
|
case llvm::AtomicOrderingCABI::acq_rel:
|
|
|
|
if (IsLoad || IsStore)
|
|
|
|
break; // Avoid crashing on code with undefined behavior
|
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
|
2017-08-16 00:02:49 +08:00
|
|
|
llvm::AtomicOrdering::AcquireRelease, Scope);
|
2016-04-19 02:01:49 +08:00
|
|
|
break;
|
|
|
|
case llvm::AtomicOrderingCABI::seq_cst:
|
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
|
2017-08-16 00:02:49 +08:00
|
|
|
llvm::AtomicOrdering::SequentiallyConsistent, Scope);
|
2016-04-19 02:01:49 +08:00
|
|
|
break;
|
|
|
|
}
|
2014-12-12 16:16:09 +08:00
|
|
|
if (RValTy->isVoidType())
|
2014-05-21 13:09:00 +08:00
|
|
|
return RValue::get(nullptr);
|
2015-11-10 03:56:35 +08:00
|
|
|
|
|
|
|
return convertTempToRValue(
|
2017-10-17 22:19:29 +08:00
|
|
|
Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
|
|
|
|
Dest.getAddressSpace())),
|
2015-11-10 03:56:35 +08:00
|
|
|
RValTy, E->getExprLoc());
|
2013-03-08 05:37:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Long case, when Order isn't obviously constant.
|
|
|
|
|
|
|
|
// Create all the relevant BB's
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
|
|
|
|
*ReleaseBB = nullptr, *AcqRelBB = nullptr,
|
|
|
|
*SeqCstBB = nullptr;
|
2013-03-08 05:37:12 +08:00
|
|
|
MonotonicBB = createBasicBlock("monotonic", CurFn);
|
|
|
|
if (!IsStore)
|
|
|
|
AcquireBB = createBasicBlock("acquire", CurFn);
|
|
|
|
if (!IsLoad)
|
|
|
|
ReleaseBB = createBasicBlock("release", CurFn);
|
|
|
|
if (!IsLoad && !IsStore)
|
|
|
|
AcqRelBB = createBasicBlock("acqrel", CurFn);
|
|
|
|
SeqCstBB = createBasicBlock("seqcst", CurFn);
|
|
|
|
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
|
|
|
|
|
|
|
|
// Create the switch for the split
|
|
|
|
// MonotonicBB is arbitrarily chosen as the default case; in practice, this
|
|
|
|
// doesn't matter unless someone is crazy enough to use something that
|
|
|
|
// doesn't fold to a constant for the ordering.
|
|
|
|
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
|
|
|
|
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
|
|
|
|
|
|
|
|
// Emit all the different atomics
|
|
|
|
Builder.SetInsertPoint(MonotonicBB);
|
2017-08-05 02:16:31 +08:00
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
|
2017-08-16 00:02:49 +08:00
|
|
|
llvm::AtomicOrdering::Monotonic, Scope);
|
2013-03-08 05:37:12 +08:00
|
|
|
Builder.CreateBr(ContBB);
|
|
|
|
if (!IsStore) {
|
|
|
|
Builder.SetInsertPoint(AcquireBB);
|
2017-08-05 02:16:31 +08:00
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
|
2017-08-16 00:02:49 +08:00
|
|
|
llvm::AtomicOrdering::Acquire, Scope);
|
2013-03-08 05:37:12 +08:00
|
|
|
Builder.CreateBr(ContBB);
|
2016-04-19 02:01:49 +08:00
|
|
|
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
|
2014-03-14 03:25:52 +08:00
|
|
|
AcquireBB);
|
2016-04-19 02:01:49 +08:00
|
|
|
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
|
2014-03-14 03:25:52 +08:00
|
|
|
AcquireBB);
|
2013-03-08 05:37:12 +08:00
|
|
|
}
|
|
|
|
if (!IsLoad) {
|
|
|
|
Builder.SetInsertPoint(ReleaseBB);
|
2017-08-05 02:16:31 +08:00
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
|
2017-08-16 00:02:49 +08:00
|
|
|
llvm::AtomicOrdering::Release, Scope);
|
2013-03-08 05:37:12 +08:00
|
|
|
Builder.CreateBr(ContBB);
|
2016-04-19 02:01:49 +08:00
|
|
|
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
|
2014-03-14 03:25:52 +08:00
|
|
|
ReleaseBB);
|
2013-03-08 05:37:12 +08:00
|
|
|
}
|
|
|
|
if (!IsLoad && !IsStore) {
|
|
|
|
Builder.SetInsertPoint(AcqRelBB);
|
2017-08-05 02:16:31 +08:00
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
|
2017-08-16 00:02:49 +08:00
|
|
|
llvm::AtomicOrdering::AcquireRelease, Scope);
|
2013-03-08 05:37:12 +08:00
|
|
|
Builder.CreateBr(ContBB);
|
2016-04-19 02:01:49 +08:00
|
|
|
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
|
2014-03-14 03:25:52 +08:00
|
|
|
AcqRelBB);
|
2013-03-08 05:37:12 +08:00
|
|
|
}
|
|
|
|
Builder.SetInsertPoint(SeqCstBB);
|
2017-08-05 02:16:31 +08:00
|
|
|
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
|
2017-08-16 00:02:49 +08:00
|
|
|
llvm::AtomicOrdering::SequentiallyConsistent, Scope);
|
2013-03-08 05:37:12 +08:00
|
|
|
Builder.CreateBr(ContBB);
|
2016-04-19 02:01:49 +08:00
|
|
|
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
|
2014-03-14 03:25:52 +08:00
|
|
|
SeqCstBB);
|
2013-03-08 05:37:12 +08:00
|
|
|
|
|
|
|
// Cleanup and return
|
|
|
|
Builder.SetInsertPoint(ContBB);
|
2014-12-12 16:16:09 +08:00
|
|
|
if (RValTy->isVoidType())
|
2014-05-21 13:09:00 +08:00
|
|
|
return RValue::get(nullptr);
|
2015-11-10 03:56:35 +08:00
|
|
|
|
|
|
|
assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
|
|
|
|
return convertTempToRValue(
|
2017-10-17 22:19:29 +08:00
|
|
|
Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
|
|
|
|
Dest.getAddressSpace())),
|
2015-11-10 03:56:35 +08:00
|
|
|
RValTy, E->getExprLoc());
|
2013-03-08 05:37:12 +08:00
|
|
|
}
|
2013-03-08 05:37:17 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
|
2013-03-08 05:37:17 +08:00
|
|
|
unsigned addrspace =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
|
2013-03-08 05:37:17 +08:00
|
|
|
llvm::IntegerType *ty =
|
|
|
|
llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
|
|
|
|
return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
|
|
|
|
}
|
|
|
|
|
2015-11-10 03:56:35 +08:00
|
|
|
Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
|
|
|
|
llvm::Type *Ty = Addr.getElementType();
|
|
|
|
uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
|
|
|
|
if (SourceSizeInBits != AtomicSizeInBits) {
|
|
|
|
Address Tmp = CreateTempAlloca();
|
|
|
|
CGF.Builder.CreateMemCpy(Tmp, Addr,
|
|
|
|
std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
|
|
|
|
Addr = Tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
return emitCastToAtomicIntPointer(Addr);
|
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
|
|
|
|
AggValueSlot resultSlot,
|
|
|
|
SourceLocation loc,
|
|
|
|
bool asValue) const {
|
2015-01-22 14:17:56 +08:00
|
|
|
if (LVal.isSimple()) {
|
|
|
|
if (EvaluationKind == TEK_Aggregate)
|
|
|
|
return resultSlot.asRValue();
|
|
|
|
|
|
|
|
// Drill into the padding structure if we have one.
|
|
|
|
if (hasPadding())
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
|
2015-01-22 14:17:56 +08:00
|
|
|
|
|
|
|
// Otherwise, just convert the temporary to an r-value using the
|
|
|
|
// normal conversion routine.
|
|
|
|
return CGF.convertTempToRValue(addr, getValueType(), loc);
|
2015-04-06 06:45:47 +08:00
|
|
|
}
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
if (!asValue)
|
2015-02-27 14:33:30 +08:00
|
|
|
// Get RValue from temp memory as atomic for non-simple lvalues
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return RValue::get(CGF.Builder.CreateLoad(addr));
|
2015-04-06 06:45:47 +08:00
|
|
|
if (LVal.isBitField())
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return CGF.EmitLoadOfBitfieldLValue(
|
|
|
|
LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
|
2017-10-17 18:17:43 +08:00
|
|
|
LVal.getBaseInfo(), TBAAAccessInfo()), loc);
|
2015-04-06 06:45:47 +08:00
|
|
|
if (LVal.isVectorElt())
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return CGF.EmitLoadOfLValue(
|
|
|
|
LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
|
2017-10-17 18:17:43 +08:00
|
|
|
LVal.getBaseInfo(), TBAAAccessInfo()), loc);
|
2015-01-22 14:17:56 +08:00
|
|
|
assert(LVal.isExtVectorElt());
|
|
|
|
return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
addr, LVal.getExtVectorElts(), LVal.getType(),
|
2017-10-17 18:17:43 +08:00
|
|
|
LVal.getBaseInfo(), TBAAAccessInfo()));
|
2013-03-08 05:37:17 +08:00
|
|
|
}
|
|
|
|
|
2015-02-27 14:33:30 +08:00
|
|
|
RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
|
|
|
|
AggValueSlot ResultSlot,
|
|
|
|
SourceLocation Loc,
|
|
|
|
bool AsValue) const {
|
2014-12-15 13:25:25 +08:00
|
|
|
// Try not to in some easy cases.
|
|
|
|
assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
|
2015-02-27 14:33:30 +08:00
|
|
|
if (getEvaluationKind() == TEK_Scalar &&
|
|
|
|
(((!LVal.isBitField() ||
|
|
|
|
LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
|
|
|
|
!hasPadding()) ||
|
|
|
|
!AsValue)) {
|
|
|
|
auto *ValTy = AsValue
|
|
|
|
? CGF.ConvertTypeForMem(ValueTy)
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
: getAtomicAddress().getType()->getPointerElementType();
|
2014-12-15 13:25:25 +08:00
|
|
|
if (ValTy->isIntegerTy()) {
|
|
|
|
assert(IntVal->getType() == ValTy && "Different integer types.");
|
2015-02-14 10:18:14 +08:00
|
|
|
return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
|
2014-12-15 13:25:25 +08:00
|
|
|
} else if (ValTy->isPointerTy())
|
|
|
|
return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
|
|
|
|
else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
|
|
|
|
return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a temporary. This needs to be big enough to hold the
|
|
|
|
// atomic integer.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Temp = Address::invalid();
|
2014-12-15 13:25:25 +08:00
|
|
|
bool TempIsVolatile = false;
|
2015-02-27 14:33:30 +08:00
|
|
|
if (AsValue && getEvaluationKind() == TEK_Aggregate) {
|
2014-12-15 13:25:25 +08:00
|
|
|
assert(!ResultSlot.isIgnored());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Temp = ResultSlot.getAddress();
|
2014-12-15 13:25:25 +08:00
|
|
|
TempIsVolatile = ResultSlot.isVolatile();
|
|
|
|
} else {
|
2015-02-27 14:33:30 +08:00
|
|
|
Temp = CreateTempAlloca();
|
2014-12-15 13:25:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Slam the integer into the temporary.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address CastTemp = emitCastToAtomicIntPointer(Temp);
|
|
|
|
CGF.Builder.CreateStore(IntVal, CastTemp)
|
2014-12-15 13:25:25 +08:00
|
|
|
->setVolatile(TempIsVolatile);
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
|
2015-02-27 14:33:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
|
|
|
|
llvm::AtomicOrdering AO, bool) {
|
|
|
|
// void __atomic_load(size_t size, void *mem, void *return, int order);
|
|
|
|
CallArgList Args;
|
|
|
|
Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
|
2015-02-27 14:33:30 +08:00
|
|
|
CGF.getContext().VoidPtrTy);
|
|
|
|
Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
|
|
|
|
CGF.getContext().VoidPtrTy);
|
2016-04-19 02:01:49 +08:00
|
|
|
Args.add(
|
|
|
|
RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
|
|
|
|
CGF.getContext().IntTy);
|
2015-02-27 14:33:30 +08:00
|
|
|
emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
|
|
|
|
bool IsVolatile) {
|
|
|
|
// Okay, we're doing this natively.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Addr = getAtomicAddressAsAtomicIntPointer();
|
2015-02-27 14:33:30 +08:00
|
|
|
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
|
|
|
|
Load->setAtomic(AO);
|
|
|
|
|
|
|
|
// Other decoration.
|
|
|
|
if (IsVolatile)
|
|
|
|
Load->setVolatile(true);
|
2017-10-06 16:17:48 +08:00
|
|
|
CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
|
2015-02-27 14:33:30 +08:00
|
|
|
return Load;
|
2014-12-15 13:25:25 +08:00
|
|
|
}
|
|
|
|
|
2015-02-14 09:35:12 +08:00
|
|
|
/// An LValue is a candidate for having its loads and stores be made atomic if
|
|
|
|
/// we are operating under /volatile:ms *and* the LValue itself is volatile and
|
|
|
|
/// performing such an operation can be performed without a libcall.
|
|
|
|
bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
if (!CGM.getCodeGenOpts().MSVolatile) return false;
|
2015-02-14 09:35:12 +08:00
|
|
|
AtomicInfo AI(*this, LV);
|
|
|
|
bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
|
|
|
|
// An atomic is inline if we don't need to use a libcall.
|
|
|
|
bool AtomicIsInline = !AI.shouldUseLibcall();
|
2016-01-23 00:36:44 +08:00
|
|
|
// MSVC doesn't seem to do this for types wider than a pointer.
|
2016-05-25 00:09:25 +08:00
|
|
|
if (getContext().getTypeSize(LV.getType()) >
|
2016-01-23 00:36:44 +08:00
|
|
|
getContext().getTypeSize(getContext().getIntPtrType()))
|
|
|
|
return false;
|
2016-05-25 00:09:25 +08:00
|
|
|
return IsVolatile && AtomicIsInline;
|
2015-02-14 09:35:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
|
|
|
|
AggValueSlot Slot) {
|
|
|
|
llvm::AtomicOrdering AO;
|
|
|
|
bool IsVolatile = LV.isVolatileQualified();
|
|
|
|
if (LV.getType()->isAtomicType()) {
|
2016-04-07 01:26:42 +08:00
|
|
|
AO = llvm::AtomicOrdering::SequentiallyConsistent;
|
2015-02-14 09:35:12 +08:00
|
|
|
} else {
|
2016-04-07 01:26:42 +08:00
|
|
|
AO = llvm::AtomicOrdering::Acquire;
|
2015-02-14 09:35:12 +08:00
|
|
|
IsVolatile = true;
|
|
|
|
}
|
|
|
|
return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
|
|
|
|
}
|
|
|
|
|
2015-02-27 14:33:30 +08:00
|
|
|
RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
|
|
|
|
bool AsValue, llvm::AtomicOrdering AO,
|
|
|
|
bool IsVolatile) {
|
2013-03-08 05:37:17 +08:00
|
|
|
// Check whether we should use a library call.
|
2015-02-27 14:33:30 +08:00
|
|
|
if (shouldUseLibcall()) {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address TempAddr = Address::invalid();
|
2015-02-27 14:33:30 +08:00
|
|
|
if (LVal.isSimple() && !ResultSlot.isIgnored()) {
|
|
|
|
assert(getEvaluationKind() == TEK_Aggregate);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
TempAddr = ResultSlot.getAddress();
|
2015-01-22 14:17:56 +08:00
|
|
|
} else
|
2015-02-27 14:33:30 +08:00
|
|
|
TempAddr = CreateTempAlloca();
|
2013-03-08 05:37:17 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
|
2015-02-27 14:33:30 +08:00
|
|
|
|
|
|
|
// Okay, turn that back into the original value or whole atomic (for
|
|
|
|
// non-simple lvalues) type.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
|
2013-03-08 05:37:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Okay, we're doing this natively.
|
2015-02-27 14:33:30 +08:00
|
|
|
auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
|
2013-03-08 05:37:17 +08:00
|
|
|
|
|
|
|
// If we're ignoring an aggregate return, don't do anything.
|
2015-02-27 14:33:30 +08:00
|
|
|
if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return RValue::getAggregate(Address::invalid(), false);
|
2013-03-08 05:37:17 +08:00
|
|
|
|
2015-02-27 14:33:30 +08:00
|
|
|
// Okay, turn that back into the original value or atomic (for non-simple
|
|
|
|
// lvalues) type.
|
|
|
|
return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
|
2013-03-08 05:37:17 +08:00
|
|
|
}
|
|
|
|
|
2015-02-27 14:33:30 +08:00
|
|
|
/// Emit a load from an l-value of atomic type. Note that the r-value
|
|
|
|
/// we produce is an r-value of the atomic *value* type.
|
|
|
|
RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
|
|
|
|
llvm::AtomicOrdering AO, bool IsVolatile,
|
|
|
|
AggValueSlot resultSlot) {
|
|
|
|
AtomicInfo Atomics(*this, src);
|
|
|
|
return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
|
|
|
|
IsVolatile);
|
|
|
|
}
|
2013-03-08 05:37:17 +08:00
|
|
|
|
|
|
|
/// Copy an r-value into memory as part of storing to an atomic type.
|
|
|
|
/// This needs to create a bit-pattern suitable for atomic operations.
|
2015-01-22 14:17:56 +08:00
|
|
|
void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
|
|
|
|
assert(LVal.isSimple());
|
2013-03-08 05:37:17 +08:00
|
|
|
// If we have an r-value, the rvalue should be of the atomic type,
|
|
|
|
// which means that the caller is responsible for having zeroed
|
|
|
|
// any padding. Just do an aggregate copy of that type.
|
|
|
|
if (rvalue.isAggregate()) {
|
2018-01-25 22:21:55 +08:00
|
|
|
LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
|
|
|
|
LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
|
|
|
|
getAtomicType());
|
|
|
|
bool IsVolatile = rvalue.isVolatileQualified() ||
|
|
|
|
LVal.isVolatileQualified();
|
|
|
|
CGF.EmitAggregateCopy(Dest, Src, getAtomicType(), IsVolatile);
|
2013-03-08 05:37:17 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Okay, otherwise we're copying stuff.
|
|
|
|
|
|
|
|
// Zero out the buffer if necessary.
|
2015-01-22 14:17:56 +08:00
|
|
|
emitMemSetZeroIfNecessary();
|
2013-03-08 05:37:17 +08:00
|
|
|
|
|
|
|
// Drill past the padding if present.
|
2015-01-22 14:17:56 +08:00
|
|
|
LValue TempLVal = projectValue();
|
2013-03-08 05:37:17 +08:00
|
|
|
|
|
|
|
// Okay, store the rvalue in.
|
|
|
|
if (rvalue.isScalar()) {
|
2015-01-22 14:17:56 +08:00
|
|
|
CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
|
2013-03-08 05:37:17 +08:00
|
|
|
} else {
|
2015-01-22 14:17:56 +08:00
|
|
|
CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
|
2013-03-08 05:37:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/// Materialize an r-value into memory for the purposes of storing it
|
|
|
|
/// to an atomic type.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address AtomicInfo::materializeRValue(RValue rvalue) const {
|
2013-03-08 05:37:17 +08:00
|
|
|
// Aggregate r-values are already in memory, and EmitAtomicStore
|
|
|
|
// requires them to be values of the atomic type.
|
|
|
|
if (rvalue.isAggregate())
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return rvalue.getAggregateAddress();
|
2013-03-08 05:37:17 +08:00
|
|
|
|
|
|
|
// Otherwise, make a temporary and materialize into it.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
|
2015-02-27 14:33:30 +08:00
|
|
|
AtomicInfo Atomics(CGF, TempLV);
|
2015-01-22 14:17:56 +08:00
|
|
|
Atomics.emitCopyIntoMemory(rvalue);
|
2015-02-27 14:33:30 +08:00
|
|
|
return TempLV.getAddress();
|
2013-03-08 05:37:17 +08:00
|
|
|
}
|
|
|
|
|
2014-12-15 13:25:25 +08:00
|
|
|
llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
|
|
|
|
// If we've got a scalar value of the right size, try to avoid going
|
|
|
|
// through memory.
|
2015-02-27 14:33:30 +08:00
|
|
|
if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
|
2014-12-15 13:25:25 +08:00
|
|
|
llvm::Value *Value = RVal.getScalarVal();
|
|
|
|
if (isa<llvm::IntegerType>(Value->getType()))
|
2015-03-30 13:20:59 +08:00
|
|
|
return CGF.EmitToMemory(Value, ValueTy);
|
2014-12-15 13:25:25 +08:00
|
|
|
else {
|
2015-02-27 14:33:30 +08:00
|
|
|
llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
|
|
|
|
CGF.getLLVMContext(),
|
|
|
|
LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
|
2014-12-15 13:25:25 +08:00
|
|
|
if (isa<llvm::PointerType>(Value->getType()))
|
|
|
|
return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
|
|
|
|
else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
|
|
|
|
return CGF.Builder.CreateBitCast(Value, InputIntTy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Otherwise, we need to go through memory.
|
|
|
|
// Put the r-value in memory.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Addr = materializeRValue(RVal);
|
2014-12-15 13:25:25 +08:00
|
|
|
|
|
|
|
// Cast the temporary to the atomic int type and pull a value out.
|
|
|
|
Addr = emitCastToAtomicIntPointer(Addr);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return CGF.Builder.CreateLoad(Addr);
|
2014-12-15 13:25:25 +08:00
|
|
|
}
|
|
|
|
|
2015-05-15 16:36:34 +08:00
|
|
|
std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
|
|
|
|
llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
|
|
|
|
llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
|
2015-02-27 14:33:30 +08:00
|
|
|
// Do the atomic store.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Addr = getAtomicAddressAsAtomicIntPointer();
|
|
|
|
auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
|
|
|
|
ExpectedVal, DesiredVal,
|
2015-03-30 13:20:59 +08:00
|
|
|
Success, Failure);
|
2015-02-27 14:33:30 +08:00
|
|
|
// Other decoration.
|
|
|
|
Inst->setVolatile(LVal.isVolatileQualified());
|
|
|
|
Inst->setWeak(IsWeak);
|
|
|
|
|
|
|
|
// Okay, turn that back into the original value type.
|
|
|
|
auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
|
|
|
|
auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
|
2015-05-15 16:36:34 +08:00
|
|
|
return std::make_pair(PreviousVal, SuccessFailureVal);
|
2015-02-27 14:33:30 +08:00
|
|
|
}
|
|
|
|
|
2015-05-15 16:36:34 +08:00
|
|
|
llvm::Value *
|
|
|
|
AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
|
|
|
|
llvm::Value *DesiredAddr,
|
2015-02-27 14:33:30 +08:00
|
|
|
llvm::AtomicOrdering Success,
|
|
|
|
llvm::AtomicOrdering Failure) {
|
|
|
|
// bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
|
|
|
|
// void *desired, int success, int failure);
|
|
|
|
CallArgList Args;
|
|
|
|
Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
|
2015-02-27 14:33:30 +08:00
|
|
|
CGF.getContext().VoidPtrTy);
|
|
|
|
Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
|
|
|
|
CGF.getContext().VoidPtrTy);
|
|
|
|
Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
|
|
|
|
CGF.getContext().VoidPtrTy);
|
2016-04-19 02:01:49 +08:00
|
|
|
Args.add(RValue::get(
|
|
|
|
llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
|
2015-02-27 14:33:30 +08:00
|
|
|
CGF.getContext().IntTy);
|
2016-04-19 02:01:49 +08:00
|
|
|
Args.add(RValue::get(
|
|
|
|
llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
|
2015-02-27 14:33:30 +08:00
|
|
|
CGF.getContext().IntTy);
|
|
|
|
auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
|
|
|
|
CGF.getContext().BoolTy, Args);
|
2015-03-30 13:20:59 +08:00
|
|
|
|
2015-05-15 16:36:34 +08:00
|
|
|
return SuccessFailureRVal.getScalarVal();
|
2015-02-27 14:33:30 +08:00
|
|
|
}
|
|
|
|
|
2015-03-30 13:20:59 +08:00
|
|
|
std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
|
2015-02-27 14:33:30 +08:00
|
|
|
RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
|
|
|
|
llvm::AtomicOrdering Failure, bool IsWeak) {
|
2016-04-07 07:37:36 +08:00
|
|
|
if (isStrongerThan(Failure, Success))
|
|
|
|
// Don't assert on undefined behavior "failure argument shall be no stronger
|
|
|
|
// than the success argument".
|
2015-02-27 14:33:30 +08:00
|
|
|
Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
|
|
|
|
|
|
|
|
// Check whether we should use a library call.
|
|
|
|
if (shouldUseLibcall()) {
|
|
|
|
// Produce a source address.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ExpectedAddr = materializeRValue(Expected);
|
|
|
|
Address DesiredAddr = materializeRValue(Desired);
|
|
|
|
auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
|
|
|
|
DesiredAddr.getPointer(),
|
2015-05-15 16:36:34 +08:00
|
|
|
Success, Failure);
|
|
|
|
return std::make_pair(
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
|
|
|
|
SourceLocation(), /*AsValue=*/false),
|
2015-05-15 16:36:34 +08:00
|
|
|
Res);
|
2015-02-27 14:33:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we've got a scalar value of the right size, try to avoid going
|
|
|
|
// through memory.
|
2015-05-15 16:36:34 +08:00
|
|
|
auto *ExpectedVal = convertRValueToInt(Expected);
|
|
|
|
auto *DesiredVal = convertRValueToInt(Desired);
|
|
|
|
auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
|
|
|
|
Failure, IsWeak);
|
|
|
|
return std::make_pair(
|
|
|
|
ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
|
|
|
|
SourceLocation(), /*AsValue=*/false),
|
|
|
|
Res.second);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
|
|
|
|
const llvm::function_ref<RValue(RValue)> &UpdateOp,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address DesiredAddr) {
|
2015-05-15 16:36:34 +08:00
|
|
|
RValue UpRVal;
|
|
|
|
LValue AtomicLVal = Atomics.getAtomicLValue();
|
|
|
|
LValue DesiredLVal;
|
|
|
|
if (AtomicLVal.isSimple()) {
|
|
|
|
UpRVal = OldRVal;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
|
2015-05-15 16:36:34 +08:00
|
|
|
} else {
|
|
|
|
// Build new lvalue for temp address
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address Ptr = Atomics.materializeRValue(OldRVal);
|
|
|
|
LValue UpdateLVal;
|
2015-05-15 16:36:34 +08:00
|
|
|
if (AtomicLVal.isBitField()) {
|
|
|
|
UpdateLVal =
|
|
|
|
LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
AtomicLVal.getType(),
|
2017-10-17 18:17:43 +08:00
|
|
|
AtomicLVal.getBaseInfo(),
|
|
|
|
AtomicLVal.getTBAAInfo());
|
2015-05-15 16:36:34 +08:00
|
|
|
DesiredLVal =
|
|
|
|
LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
|
2017-10-17 18:17:43 +08:00
|
|
|
AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
|
|
|
|
AtomicLVal.getTBAAInfo());
|
2015-05-15 16:36:34 +08:00
|
|
|
} else if (AtomicLVal.isVectorElt()) {
|
|
|
|
UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
|
|
|
|
AtomicLVal.getType(),
|
2017-10-17 18:17:43 +08:00
|
|
|
AtomicLVal.getBaseInfo(),
|
|
|
|
AtomicLVal.getTBAAInfo());
|
2015-05-15 16:36:34 +08:00
|
|
|
DesiredLVal = LValue::MakeVectorElt(
|
|
|
|
DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
|
2017-10-17 18:17:43 +08:00
|
|
|
AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
|
2015-05-15 16:36:34 +08:00
|
|
|
} else {
|
|
|
|
assert(AtomicLVal.isExtVectorElt());
|
|
|
|
UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
|
|
|
|
AtomicLVal.getType(),
|
2017-10-17 18:17:43 +08:00
|
|
|
AtomicLVal.getBaseInfo(),
|
|
|
|
AtomicLVal.getTBAAInfo());
|
2015-05-15 16:36:34 +08:00
|
|
|
DesiredLVal = LValue::MakeExtVectorElt(
|
|
|
|
DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
|
2017-10-17 18:17:43 +08:00
|
|
|
AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
|
2015-05-15 16:36:34 +08:00
|
|
|
}
|
|
|
|
UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
|
|
|
|
}
|
|
|
|
// Store new value in the corresponding memory area
|
|
|
|
RValue NewRVal = UpdateOp(UpRVal);
|
|
|
|
if (NewRVal.isScalar()) {
|
|
|
|
CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
|
|
|
|
} else {
|
|
|
|
assert(NewRVal.isComplex());
|
|
|
|
CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
|
|
|
|
/*isInit=*/false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void AtomicInfo::EmitAtomicUpdateLibcall(
|
|
|
|
llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
|
|
|
|
bool IsVolatile) {
|
|
|
|
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ExpectedAddr = CreateTempAlloca();
|
2015-05-15 16:36:34 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
|
2015-05-15 16:36:34 +08:00
|
|
|
auto *ContBB = CGF.createBasicBlock("atomic_cont");
|
|
|
|
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
|
|
|
|
CGF.EmitBlock(ContBB);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address DesiredAddr = CreateTempAlloca();
|
2015-05-15 16:36:34 +08:00
|
|
|
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
requiresMemSetZero(getAtomicAddress().getElementType())) {
|
|
|
|
auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
|
|
|
|
CGF.Builder.CreateStore(OldVal, DesiredAddr);
|
2015-05-15 16:36:34 +08:00
|
|
|
}
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
|
|
|
|
AggValueSlot::ignored(),
|
|
|
|
SourceLocation(), /*AsValue=*/false);
|
2015-05-15 16:36:34 +08:00
|
|
|
EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
|
|
|
|
auto *Res =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
|
|
|
|
DesiredAddr.getPointer(),
|
|
|
|
AO, Failure);
|
2015-05-15 16:36:34 +08:00
|
|
|
CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
|
|
|
|
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void AtomicInfo::EmitAtomicUpdateOp(
|
|
|
|
llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
|
|
|
|
bool IsVolatile) {
|
|
|
|
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
|
|
|
|
|
|
|
|
// Do the atomic load.
|
|
|
|
auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
|
|
|
|
// For non-simple lvalues perform compare-and-swap procedure.
|
|
|
|
auto *ContBB = CGF.createBasicBlock("atomic_cont");
|
|
|
|
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
|
|
|
|
auto *CurBB = CGF.Builder.GetInsertBlock();
|
|
|
|
CGF.EmitBlock(ContBB);
|
|
|
|
llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
|
|
|
|
/*NumReservedValues=*/2);
|
|
|
|
PHI->addIncoming(OldVal, CurBB);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address NewAtomicAddr = CreateTempAlloca();
|
|
|
|
Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
|
2015-05-15 16:36:34 +08:00
|
|
|
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
requiresMemSetZero(getAtomicAddress().getElementType())) {
|
|
|
|
CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
|
2015-05-15 16:36:34 +08:00
|
|
|
}
|
|
|
|
auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
|
|
|
|
SourceLocation(), /*AsValue=*/false);
|
|
|
|
EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
|
2015-05-15 16:36:34 +08:00
|
|
|
// Try to write new value using cmpxchg operation
|
|
|
|
auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
|
|
|
|
PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
|
|
|
|
CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
|
|
|
|
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
RValue UpdateRVal, Address DesiredAddr) {
|
2015-05-15 16:36:34 +08:00
|
|
|
LValue AtomicLVal = Atomics.getAtomicLValue();
|
|
|
|
LValue DesiredLVal;
|
|
|
|
// Build new lvalue for temp address
|
|
|
|
if (AtomicLVal.isBitField()) {
|
|
|
|
DesiredLVal =
|
|
|
|
LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
|
2017-10-17 18:17:43 +08:00
|
|
|
AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
|
|
|
|
AtomicLVal.getTBAAInfo());
|
2015-05-15 16:36:34 +08:00
|
|
|
} else if (AtomicLVal.isVectorElt()) {
|
|
|
|
DesiredLVal =
|
|
|
|
LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
|
2017-10-17 18:17:43 +08:00
|
|
|
AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
|
|
|
|
AtomicLVal.getTBAAInfo());
|
2015-05-15 16:36:34 +08:00
|
|
|
} else {
|
|
|
|
assert(AtomicLVal.isExtVectorElt());
|
|
|
|
DesiredLVal = LValue::MakeExtVectorElt(
|
|
|
|
DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
|
2017-10-17 18:17:43 +08:00
|
|
|
AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
|
2015-05-15 16:36:34 +08:00
|
|
|
}
|
|
|
|
// Store new value in the corresponding memory area
|
|
|
|
assert(UpdateRVal.isScalar());
|
|
|
|
CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
|
|
|
|
}
|
|
|
|
|
|
|
|
void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
|
|
|
|
RValue UpdateRVal, bool IsVolatile) {
|
|
|
|
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ExpectedAddr = CreateTempAlloca();
|
2015-05-15 16:36:34 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
|
2015-05-15 16:36:34 +08:00
|
|
|
auto *ContBB = CGF.createBasicBlock("atomic_cont");
|
|
|
|
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
|
|
|
|
CGF.EmitBlock(ContBB);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address DesiredAddr = CreateTempAlloca();
|
2015-05-15 16:36:34 +08:00
|
|
|
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
requiresMemSetZero(getAtomicAddress().getElementType())) {
|
|
|
|
auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
|
|
|
|
CGF.Builder.CreateStore(OldVal, DesiredAddr);
|
2015-05-15 16:36:34 +08:00
|
|
|
}
|
|
|
|
EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
|
|
|
|
auto *Res =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
|
|
|
|
DesiredAddr.getPointer(),
|
|
|
|
AO, Failure);
|
2015-05-15 16:36:34 +08:00
|
|
|
CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
|
|
|
|
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
|
|
|
|
bool IsVolatile) {
|
|
|
|
auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
|
|
|
|
|
|
|
|
// Do the atomic load.
|
|
|
|
auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
|
|
|
|
// For non-simple lvalues perform compare-and-swap procedure.
|
|
|
|
auto *ContBB = CGF.createBasicBlock("atomic_cont");
|
|
|
|
auto *ExitBB = CGF.createBasicBlock("atomic_exit");
|
|
|
|
auto *CurBB = CGF.Builder.GetInsertBlock();
|
|
|
|
CGF.EmitBlock(ContBB);
|
|
|
|
llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
|
|
|
|
/*NumReservedValues=*/2);
|
|
|
|
PHI->addIncoming(OldVal, CurBB);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address NewAtomicAddr = CreateTempAlloca();
|
|
|
|
Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
|
2015-05-15 16:36:34 +08:00
|
|
|
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
requiresMemSetZero(getAtomicAddress().getElementType())) {
|
|
|
|
CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
|
2015-05-15 16:36:34 +08:00
|
|
|
}
|
|
|
|
EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
|
2015-05-15 16:36:34 +08:00
|
|
|
// Try to write new value using cmpxchg operation
|
|
|
|
auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
|
|
|
|
PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
|
|
|
|
CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
|
|
|
|
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void AtomicInfo::EmitAtomicUpdate(
|
|
|
|
llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
|
|
|
|
bool IsVolatile) {
|
|
|
|
if (shouldUseLibcall()) {
|
|
|
|
EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
|
|
|
|
} else {
|
|
|
|
EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
|
|
|
|
bool IsVolatile) {
|
|
|
|
if (shouldUseLibcall()) {
|
|
|
|
EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
|
|
|
|
} else {
|
|
|
|
EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
|
|
|
|
}
|
2015-02-27 14:33:30 +08:00
|
|
|
}
|
|
|
|
|
2015-02-14 09:35:12 +08:00
|
|
|
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
|
|
|
|
bool isInit) {
|
|
|
|
bool IsVolatile = lvalue.isVolatileQualified();
|
|
|
|
llvm::AtomicOrdering AO;
|
|
|
|
if (lvalue.getType()->isAtomicType()) {
|
2016-04-07 01:26:42 +08:00
|
|
|
AO = llvm::AtomicOrdering::SequentiallyConsistent;
|
2015-02-14 09:35:12 +08:00
|
|
|
} else {
|
2016-04-07 01:26:42 +08:00
|
|
|
AO = llvm::AtomicOrdering::Release;
|
2015-02-14 09:35:12 +08:00
|
|
|
IsVolatile = true;
|
|
|
|
}
|
|
|
|
return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
|
|
|
|
}
|
|
|
|
|
2013-03-08 05:37:17 +08:00
|
|
|
/// Emit a store to an l-value of atomic type.
|
|
|
|
///
|
|
|
|
/// Note that the r-value is expected to be an r-value *of the atomic
|
|
|
|
/// type*; this means that for aggregate r-values, it should include
|
|
|
|
/// storage for any padding that was necessary.
|
2015-02-14 09:35:12 +08:00
|
|
|
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
|
|
|
|
llvm::AtomicOrdering AO, bool IsVolatile,
|
|
|
|
bool isInit) {
|
2013-03-08 05:37:17 +08:00
|
|
|
// If this is an aggregate r-value, it should agree in type except
|
|
|
|
// maybe for address-space qualification.
|
|
|
|
assert(!rvalue.isAggregate() ||
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
rvalue.getAggregateAddress().getElementType()
|
|
|
|
== dest.getAddress().getElementType());
|
2013-03-08 05:37:17 +08:00
|
|
|
|
|
|
|
AtomicInfo atomics(*this, dest);
|
2015-02-27 14:33:30 +08:00
|
|
|
LValue LVal = atomics.getAtomicLValue();
|
2013-03-08 05:37:17 +08:00
|
|
|
|
|
|
|
// If this is an initialization, just put the value there normally.
|
2015-02-27 14:33:30 +08:00
|
|
|
if (LVal.isSimple()) {
|
|
|
|
if (isInit) {
|
|
|
|
atomics.emitCopyIntoMemory(rvalue);
|
|
|
|
return;
|
|
|
|
}
|
2013-03-08 05:37:17 +08:00
|
|
|
|
2015-02-27 14:33:30 +08:00
|
|
|
// Check whether we should use a library call.
|
|
|
|
if (atomics.shouldUseLibcall()) {
|
|
|
|
// Produce a source address.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address srcAddr = atomics.materializeRValue(rvalue);
|
2013-03-08 05:37:17 +08:00
|
|
|
|
2015-02-27 14:33:30 +08:00
|
|
|
// void __atomic_store(size_t size, void *mem, void *val, int order)
|
|
|
|
CallArgList args;
|
|
|
|
args.add(RValue::get(atomics.getAtomicSizeValue()),
|
|
|
|
getContext().getSizeType());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
|
|
|
|
getContext().VoidPtrTy);
|
|
|
|
args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
|
2015-02-27 14:33:30 +08:00
|
|
|
getContext().VoidPtrTy);
|
2016-04-19 02:01:49 +08:00
|
|
|
args.add(
|
|
|
|
RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
|
|
|
|
getContext().IntTy);
|
2015-02-27 14:33:30 +08:00
|
|
|
emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Okay, we're doing this natively.
|
|
|
|
llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
|
|
|
|
|
|
|
|
// Do the atomic store.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address addr =
|
2015-02-27 14:33:30 +08:00
|
|
|
atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
|
|
|
|
intValue = Builder.CreateIntCast(
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
intValue, addr.getElementType(), /*isSigned=*/false);
|
2015-02-27 14:33:30 +08:00
|
|
|
llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
|
|
|
|
|
|
|
|
// Initializations don't need to be atomic.
|
|
|
|
if (!isInit)
|
|
|
|
store->setAtomic(AO);
|
|
|
|
|
|
|
|
// Other decoration.
|
|
|
|
if (IsVolatile)
|
|
|
|
store->setVolatile(true);
|
2017-10-06 16:17:48 +08:00
|
|
|
CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
|
2013-03-08 05:37:17 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-05-15 16:36:34 +08:00
|
|
|
// Emit simple atomic update operation.
|
|
|
|
atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
|
2013-03-08 05:37:17 +08:00
|
|
|
}
|
|
|
|
|
2014-12-15 13:25:25 +08:00
|
|
|
/// Emit a compare-and-exchange op for atomic type.
|
|
|
|
///
|
2015-03-30 13:20:59 +08:00
|
|
|
std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
|
2014-12-15 13:25:25 +08:00
|
|
|
LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
|
|
|
|
llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
|
|
|
|
AggValueSlot Slot) {
|
|
|
|
// If this is an aggregate r-value, it should agree in type except
|
|
|
|
// maybe for address-space qualification.
|
|
|
|
assert(!Expected.isAggregate() ||
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Expected.getAggregateAddress().getElementType() ==
|
|
|
|
Obj.getAddress().getElementType());
|
2014-12-15 13:25:25 +08:00
|
|
|
assert(!Desired.isAggregate() ||
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Desired.getAggregateAddress().getElementType() ==
|
|
|
|
Obj.getAddress().getElementType());
|
2014-12-15 13:25:25 +08:00
|
|
|
AtomicInfo Atomics(*this, Obj);
|
|
|
|
|
2015-03-30 13:20:59 +08:00
|
|
|
return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
|
|
|
|
IsWeak);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::EmitAtomicUpdate(
|
|
|
|
LValue LVal, llvm::AtomicOrdering AO,
|
2015-05-15 16:36:34 +08:00
|
|
|
const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
|
2015-03-30 13:20:59 +08:00
|
|
|
AtomicInfo Atomics(*this, LVal);
|
2015-05-15 16:36:34 +08:00
|
|
|
Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
|
2014-12-15 13:25:25 +08:00
|
|
|
}
|
|
|
|
|
2013-03-08 05:37:17 +08:00
|
|
|
void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
|
|
|
|
AtomicInfo atomics(*this, dest);
|
|
|
|
|
|
|
|
switch (atomics.getEvaluationKind()) {
|
|
|
|
case TEK_Scalar: {
|
|
|
|
llvm::Value *value = EmitScalarExpr(init);
|
2015-01-22 14:17:56 +08:00
|
|
|
atomics.emitCopyIntoMemory(RValue::get(value));
|
2013-03-08 05:37:17 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
case TEK_Complex: {
|
|
|
|
ComplexPairTy value = EmitComplexExpr(init);
|
2015-01-22 14:17:56 +08:00
|
|
|
atomics.emitCopyIntoMemory(RValue::getComplex(value));
|
2013-03-08 05:37:17 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
case TEK_Aggregate: {
|
2013-07-11 09:32:21 +08:00
|
|
|
// Fix up the destination if the initializer isn't an expression
|
|
|
|
// of atomic type.
|
|
|
|
bool Zeroed = false;
|
2013-03-08 05:37:17 +08:00
|
|
|
if (!init->getType()->isAtomicType()) {
|
2015-01-22 14:17:56 +08:00
|
|
|
Zeroed = atomics.emitMemSetZeroIfNecessary();
|
|
|
|
dest = atomics.projectValue();
|
2013-03-08 05:37:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Evaluate the expression directly into the destination.
|
|
|
|
AggValueSlot slot = AggValueSlot::forLValue(dest,
|
|
|
|
AggValueSlot::IsNotDestructed,
|
|
|
|
AggValueSlot::DoesNotNeedGCBarriers,
|
2013-07-11 09:32:21 +08:00
|
|
|
AggValueSlot::IsNotAliased,
|
|
|
|
Zeroed ? AggValueSlot::IsZeroed :
|
|
|
|
AggValueSlot::IsNotZeroed);
|
|
|
|
|
2013-03-08 05:37:17 +08:00
|
|
|
EmitAggExpr(init, slot);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
llvm_unreachable("bad evaluation kind");
|
|
|
|
}
|