Move "atomic" and "volatile" designations on instructions after the opcode

of the instruction.

Note that this change affects the existing non-atomic load and store
instructions; the parser now accepts both forms, and the change is noted
in the release notes.

llvm-svn: 137527
This commit is contained in:
Eli Friedman 2011-08-12 22:50:01 +00:00
parent 59d741fec6
commit 02e737b08e
25 changed files with 166 additions and 101 deletions

View File

@ -4572,8 +4572,8 @@ that the invoke/unwind semantics are likely to change in future versions.</p>
<h5>Syntax:</h5>
<pre>
&lt;result&gt; = [volatile] load &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;]
&lt;result&gt; = atomic [volatile] load &lt;ty&gt;* &lt;pointer&gt; [singlethread] &lt;ordering&gt;, align &lt;alignment&gt;
&lt;result&gt; = load [volatile] &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;]
&lt;result&gt; = load atomic [volatile] &lt;ty&gt;* &lt;pointer&gt; [singlethread] &lt;ordering&gt;, align &lt;alignment&gt;
!&lt;index&gt; = !{ i32 1 }
</pre>
@ -4644,8 +4644,8 @@ that the invoke/unwind semantics are likely to change in future versions.</p>
<h5>Syntax:</h5>
<pre>
[volatile] store &lt;ty&gt; &lt;value&gt;, &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;] <i>; yields {void}</i>
atomic [volatile] store &lt;ty&gt; &lt;value&gt;, &lt;ty&gt;* &lt;pointer&gt; [singlethread] &lt;ordering&gt;, align &lt;alignment&gt; <i>; yields {void}</i>
store [volatile] &lt;ty&gt; &lt;value&gt;, &lt;ty&gt;* &lt;pointer&gt;[, align &lt;alignment&gt;][, !nontemporal !&lt;index&gt;] <i>; yields {void}</i>
store atomic [volatile] &lt;ty&gt; &lt;value&gt;, &lt;ty&gt;* &lt;pointer&gt; [singlethread] &lt;ordering&gt;, align &lt;alignment&gt; <i>; yields {void}</i>
</pre>
<h5>Overview:</h5>
@ -4774,7 +4774,7 @@ thread. (This is useful for interacting with signal handlers.)</p>
<h5>Syntax:</h5>
<pre>
[volatile] cmpxchg &lt;ty&gt;* &lt;pointer&gt;, &lt;ty&gt; &lt;cmp&gt;, &lt;ty&gt; &lt;new&gt; [singlethread] &lt;ordering&gt; <i>; yields {ty}</i>
cmpxchg [volatile] &lt;ty&gt;* &lt;pointer&gt;, &lt;ty&gt; &lt;cmp&gt;, &lt;ty&gt; &lt;new&gt; [singlethread] &lt;ordering&gt; <i>; yields {ty}</i>
</pre>
<h5>Overview:</h5>
@ -4857,7 +4857,7 @@ done:
<h5>Syntax:</h5>
<pre>
[volatile] atomicrmw &lt;operation&gt; &lt;ty&gt;* &lt;pointer&gt;, &lt;ty&gt; &lt;value&gt; [singlethread] &lt;ordering&gt; <i>; yields {ty}</i>
atomicrmw [volatile] &lt;operation&gt; &lt;ty&gt;* &lt;pointer&gt;, &lt;ty&gt; &lt;value&gt; [singlethread] &lt;ordering&gt; <i>; yields {ty}</i>
</pre>
<h5>Overview:</h5>

View File

@ -583,6 +583,10 @@ it run faster:</p>
<ul>
<li>The <code>LowerSetJmp</code> pass wasn't used effectively by any
target and has been removed.</li>
<li>The syntax of volatile loads and stores in IR has been changed to
"<code>load volatile</code>"/"<code>store volatile</code>". The old
syntax ("<code>volatile load</code>"/"<code>volatile store</code>")
is still accepted, but is now considered deprecated.</li>
</ul>
</div>

View File

@ -2950,27 +2950,17 @@ int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB,
case lltok::kw_tail: return ParseCall(Inst, PFS, true);
// Memory.
case lltok::kw_alloca: return ParseAlloc(Inst, PFS);
case lltok::kw_load: return ParseLoad(Inst, PFS, false, false);
case lltok::kw_store: return ParseStore(Inst, PFS, false, false);
case lltok::kw_cmpxchg: return ParseCmpXchg(Inst, PFS, false);
case lltok::kw_atomicrmw: return ParseAtomicRMW(Inst, PFS, false);
case lltok::kw_load: return ParseLoad(Inst, PFS, false);
case lltok::kw_store: return ParseStore(Inst, PFS, false);
case lltok::kw_cmpxchg: return ParseCmpXchg(Inst, PFS);
case lltok::kw_atomicrmw: return ParseAtomicRMW(Inst, PFS);
case lltok::kw_fence: return ParseFence(Inst, PFS);
case lltok::kw_atomic: {
bool isVolatile = EatIfPresent(lltok::kw_volatile);
if (EatIfPresent(lltok::kw_load))
return ParseLoad(Inst, PFS, true, isVolatile);
else if (EatIfPresent(lltok::kw_store))
return ParseStore(Inst, PFS, true, isVolatile);
}
case lltok::kw_volatile:
// For compatibility; canonical location is after load
if (EatIfPresent(lltok::kw_load))
return ParseLoad(Inst, PFS, false, true);
return ParseLoad(Inst, PFS, true);
else if (EatIfPresent(lltok::kw_store))
return ParseStore(Inst, PFS, false, true);
else if (EatIfPresent(lltok::kw_cmpxchg))
return ParseCmpXchg(Inst, PFS, true);
else if (EatIfPresent(lltok::kw_atomicrmw))
return ParseAtomicRMW(Inst, PFS, true);
return ParseStore(Inst, PFS, true);
else
return TokError("expected 'load' or 'store'");
case lltok::kw_getelementptr: return ParseGetElementPtr(Inst, PFS);
@ -3694,16 +3684,34 @@ int LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS) {
}
/// ParseLoad
/// ::= 'volatile'? 'load' TypeAndValue (',' 'align' i32)?
// ::= 'atomic' 'volatile'? 'load' TypeAndValue
// 'singlethread'? AtomicOrdering (',' 'align' i32)?
/// ::= 'load' 'volatile'? TypeAndValue (',' 'align' i32)?
/// ::= 'load' 'atomic' 'volatile'? TypeAndValue
/// 'singlethread'? AtomicOrdering (',' 'align' i32)?
/// Compatibility:
/// ::= 'volatile' 'load' TypeAndValue (',' 'align' i32)?
int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
bool isAtomic, bool isVolatile) {
bool isVolatile) {
Value *Val; LocTy Loc;
unsigned Alignment = 0;
bool AteExtraComma = false;
bool isAtomic = false;
AtomicOrdering Ordering = NotAtomic;
SynchronizationScope Scope = CrossThread;
if (Lex.getKind() == lltok::kw_atomic) {
if (isVolatile)
return TokError("mixing atomic with old volatile placement");
isAtomic = true;
Lex.Lex();
}
if (Lex.getKind() == lltok::kw_volatile) {
if (isVolatile)
return TokError("duplicate volatile before and after store");
isVolatile = true;
Lex.Lex();
}
if (ParseTypeAndValue(Val, Loc, PFS) ||
ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
ParseOptionalCommaAlign(Alignment, AteExtraComma))
@ -3722,16 +3730,35 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
}
/// ParseStore
/// ::= 'volatile'? 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)?
/// ::= 'atomic' 'volatile'? 'store' TypeAndValue ',' TypeAndValue
/// ::= 'store' 'volatile'? TypeAndValue ',' TypeAndValue (',' 'align' i32)?
/// ::= 'store' 'atomic' 'volatile'? TypeAndValue ',' TypeAndValue
/// 'singlethread'? AtomicOrdering (',' 'align' i32)?
/// Compatibility:
/// ::= 'volatile' 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)?
int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
bool isAtomic, bool isVolatile) {
bool isVolatile) {
Value *Val, *Ptr; LocTy Loc, PtrLoc;
unsigned Alignment = 0;
bool AteExtraComma = false;
bool isAtomic = false;
AtomicOrdering Ordering = NotAtomic;
SynchronizationScope Scope = CrossThread;
if (Lex.getKind() == lltok::kw_atomic) {
if (isVolatile)
return TokError("mixing atomic with old volatile placement");
isAtomic = true;
Lex.Lex();
}
if (Lex.getKind() == lltok::kw_volatile) {
if (isVolatile)
return TokError("duplicate volatile before and after store");
isVolatile = true;
Lex.Lex();
}
if (ParseTypeAndValue(Val, Loc, PFS) ||
ParseToken(lltok::comma, "expected ',' after store operand") ||
ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
@ -3755,14 +3782,18 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
}
/// ParseCmpXchg
/// ::= 'volatile'? 'cmpxchg' TypeAndValue ',' TypeAndValue ',' TypeAndValue
/// 'singlethread'? AtomicOrdering
int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS,
bool isVolatile) {
/// ::= 'cmpxchg' 'volatile'? TypeAndValue ',' TypeAndValue ',' TypeAndValue
/// 'singlethread'? AtomicOrdering
int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
Value *Ptr, *Cmp, *New; LocTy PtrLoc, CmpLoc, NewLoc;
bool AteExtraComma = false;
AtomicOrdering Ordering = NotAtomic;
SynchronizationScope Scope = CrossThread;
bool isVolatile = false;
if (EatIfPresent(lltok::kw_volatile))
isVolatile = true;
if (ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
ParseToken(lltok::comma, "expected ',' after cmpxchg address") ||
ParseTypeAndValue(Cmp, CmpLoc, PFS) ||
@ -3794,15 +3825,19 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS,
}
/// ParseAtomicRMW
/// ::= 'volatile'? 'atomicrmw' BinOp TypeAndValue ',' TypeAndValue
/// 'singlethread'? AtomicOrdering
int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS,
bool isVolatile) {
/// ::= 'atomicrmw' 'volatile'? BinOp TypeAndValue ',' TypeAndValue
/// 'singlethread'? AtomicOrdering
int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
Value *Ptr, *Val; LocTy PtrLoc, ValLoc;
bool AteExtraComma = false;
AtomicOrdering Ordering = NotAtomic;
SynchronizationScope Scope = CrossThread;
bool isVolatile = false;
AtomicRMWInst::BinOp Operation;
if (EatIfPresent(lltok::kw_volatile))
isVolatile = true;
switch (Lex.getKind()) {
default: return TokError("expected binary operation in atomicrmw");
case lltok::kw_xchg: Operation = AtomicRMWInst::Xchg; break;

View File

@ -363,12 +363,10 @@ namespace llvm {
bool ParseLandingPad(Instruction *&I, PerFunctionState &PFS);
bool ParseCall(Instruction *&I, PerFunctionState &PFS, bool isTail);
int ParseAlloc(Instruction *&I, PerFunctionState &PFS);
int ParseLoad(Instruction *&I, PerFunctionState &PFS,
bool isAtomic, bool isVolatile);
int ParseStore(Instruction *&I, PerFunctionState &PFS,
bool isAtomic, bool isVolatile);
int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
int ParseLoad(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
int ParseStore(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS);
int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS);
int ParseFence(Instruction *&I, PerFunctionState &PFS);
int ParseGetElementPtr(Instruction *&I, PerFunctionState &PFS);
int ParseExtractValue(Instruction *&I, PerFunctionState &PFS);

View File

@ -1658,16 +1658,6 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
else
Out << '%' << SlotNum << " = ";
}
// If this is an atomic load or store, print out the atomic marker.
if ((isa<LoadInst>(I) && cast<LoadInst>(I).isAtomic()) ||
(isa<StoreInst>(I) && cast<StoreInst>(I).isAtomic()))
Out << "atomic ";
// If this is a volatile load or store, print out the volatile marker.
if ((isa<LoadInst>(I) && cast<LoadInst>(I).isVolatile()) ||
(isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile()))
Out << "volatile ";
if (isa<CallInst>(I) && cast<CallInst>(I).isTailCall())
Out << "tail ";
@ -1675,6 +1665,18 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
// Print out the opcode...
Out << I.getOpcodeName();
// If this is an atomic load or store, print out the atomic marker.
if ((isa<LoadInst>(I) && cast<LoadInst>(I).isAtomic()) ||
(isa<StoreInst>(I) && cast<StoreInst>(I).isAtomic()))
Out << " atomic";
// If this is a volatile operation, print out the volatile marker.
if ((isa<LoadInst>(I) && cast<LoadInst>(I).isVolatile()) ||
(isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile()) ||
(isa<AtomicCmpXchgInst>(I) && cast<AtomicCmpXchgInst>(I).isVolatile()) ||
(isa<AtomicRMWInst>(I) && cast<AtomicRMWInst>(I).isVolatile()))
Out << " volatile";
// Print out optimization information.
WriteOptimizationInfo(Out, &I);

View File

@ -0,0 +1,26 @@
; RUN: opt -S < %s | FileCheck %s
; Basic smoke test for atomic operations.
define void @f(i32* %x) {
; CHECK: load atomic i32* %x unordered, align 4
load atomic i32* %x unordered, align 4
; CHECK: load atomic volatile i32* %x singlethread acquire, align 4
load atomic volatile i32* %x singlethread acquire, align 4
; CHECK: store atomic i32 3, i32* %x release, align 4
store atomic i32 3, i32* %x release, align 4
; CHECK: store atomic volatile i32 3, i32* %x singlethread monotonic, align 4
store atomic volatile i32 3, i32* %x singlethread monotonic, align 4
; CHECK: cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic
cmpxchg i32* %x, i32 1, i32 0 singlethread monotonic
; CHECK: cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel
cmpxchg volatile i32* %x, i32 0, i32 1 acq_rel
; CHECK: atomicrmw add i32* %x, i32 10 seq_cst
atomicrmw add i32* %x, i32 10 seq_cst
; CHECK: atomicrmw volatile xchg i32* %x, i32 10 monotonic
atomicrmw volatile xchg i32* %x, i32 10 monotonic
; CHECK: fence singlethread release
fence singlethread release
; CHECK: fence seq_cst
fence seq_cst
ret void
}

View File

@ -31,7 +31,7 @@ define void @h() {
entry:
%i = alloca i32, align 4
volatile store i32 10, i32* %i, align 4
; CHECK: %tmp = volatile load i32* %i, align 4
; CHECK: %tmp = load volatile i32* %i, align 4
; CHECK-next: call void @f(i32 undef)
%tmp = volatile load i32* %i, align 4
call void @f(i32 %tmp)

View File

@ -42,20 +42,20 @@ define i32 @test3(i32* %g_addr) nounwind {
define void @test4(i32* %Q) {
%a = load i32* %Q
volatile store i32 %a, i32* %Q
store volatile i32 %a, i32* %Q
ret void
; CHECK: @test4
; CHECK-NEXT: load i32
; CHECK-NEXT: volatile store
; CHECK-NEXT: store volatile
; CHECK-NEXT: ret void
}
define void @test5(i32* %Q) {
%a = volatile load i32* %Q
%a = load volatile i32* %Q
store i32 %a, i32* %Q
ret void
; CHECK: @test5
; CHECK-NEXT: volatile load
; CHECK-NEXT: load volatile
; CHECK-NEXT: ret void
}

View File

@ -13,21 +13,21 @@ define void @test1(i8 %V, i32 *%P) {
volatile store i32 %C, i32* %P
volatile store i32 %D, i32* %P
; CHECK-NEXT: %C = zext i8 %V to i32
; CHECK-NEXT: volatile store i32 %C
; CHECK-NEXT: volatile store i32 %C
; CHECK-NEXT: store volatile i32 %C
; CHECK-NEXT: store volatile i32 %C
%E = add i32 %C, %C
%F = add i32 %C, %C
volatile store i32 %E, i32* %P
volatile store i32 %F, i32* %P
; CHECK-NEXT: %E = add i32 %C, %C
; CHECK-NEXT: volatile store i32 %E
; CHECK-NEXT: volatile store i32 %E
; CHECK-NEXT: store volatile i32 %E
; CHECK-NEXT: store volatile i32 %E
%G = add nuw i32 %C, %C ;; not a CSE with E
volatile store i32 %G, i32* %P
; CHECK-NEXT: %G = add nuw i32 %C, %C
; CHECK-NEXT: volatile store i32 %G
; CHECK-NEXT: store volatile i32 %G
ret void
}

View File

@ -1,4 +1,4 @@
; RUN: opt < %s -globalopt -S | grep {volatile load}
; RUN: opt < %s -globalopt -S | grep {load volatile}
@t0.1441 = internal global double 0x3FD5555555555555, align 8 ; <double*> [#uses=1]
define double @foo() nounwind {

View File

@ -1,4 +1,4 @@
; RUN: opt < %s -instcombine -S | grep {volatile store}
; RUN: opt < %s -instcombine -S | grep {store volatile}
define void @test() {
%votf = alloca <4 x float> ; <<4 x float>*> [#uses=1]

View File

@ -1,4 +1,4 @@
; RUN: opt < %s -instcombine -S | grep {volatile load} | count 2
; RUN: opt < %s -instcombine -S | grep {load volatile} | count 2
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"
@g_1 = internal global i32 0 ; <i32*> [#uses=3]

View File

@ -1,4 +1,4 @@
; RUN: opt < %s -instcombine -S | grep {volatile load} | count 2
; RUN: opt < %s -instcombine -S | grep {load volatile} | count 2
; PR2262
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"

View File

@ -1,4 +1,4 @@
; RUN: opt < %s -instcombine -S | grep {volatile load} | count 2
; RUN: opt < %s -instcombine -S | grep {load volatile} | count 2
; PR2496
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"

View File

@ -96,7 +96,7 @@ define i32 @nogep-multiuse({i32, i32}* %pair) {
}
; CHECK: define i32 @nogep-volatile
; CHECK-NEXT: volatile load {{.*}} %pair
; CHECK-NEXT: load volatile {{.*}} %pair
; CHECK-NEXT: extractvalue
; CHECK-NEXT: ret
define i32 @nogep-volatile({i32, i32}* %pair) {

View File

@ -152,9 +152,9 @@ entry:
ret void
; CHECK: @powi
; CHECK: %A = fdiv double 1.0{{.*}}, %V
; CHECK: volatile store double %A,
; CHECK: volatile store double 1.0
; CHECK: volatile store double %V
; CHECK: store volatile double %A,
; CHECK: store volatile double 1.0
; CHECK: store volatile double %V
}
define i32 @cttz(i32 %a) {
@ -194,11 +194,11 @@ entry:
; CHECK: @cmp.simplify
; CHECK-NEXT: entry:
; CHECK-NEXT: %lz.cmp = icmp eq i32 %a, 0
; CHECK-NEXT: volatile store i1 %lz.cmp, i1* %c
; CHECK-NEXT: store volatile i1 %lz.cmp, i1* %c
; CHECK-NEXT: %tz.cmp = icmp ne i32 %a, 0
; CHECK-NEXT: volatile store i1 %tz.cmp, i1* %c
; CHECK-NEXT: store volatile i1 %tz.cmp, i1* %c
; CHECK-NEXT: %pop.cmp = icmp eq i32 %b, 0
; CHECK-NEXT: volatile store i1 %pop.cmp, i1* %c
; CHECK-NEXT: store volatile i1 %pop.cmp, i1* %c
}

View File

@ -1,5 +1,5 @@
; RUN: opt < %s -instcombine -S | grep {volatile store}
; RUN: opt < %s -instcombine -S | grep {volatile load}
; RUN: opt < %s -instcombine -S | grep {store volatile}
; RUN: opt < %s -instcombine -S | grep {load volatile}
@x = weak global i32 0 ; <i32*> [#uses=2]

View File

@ -1,5 +1,5 @@
; RUN: opt < %s -jump-threading -loop-rotate -instcombine -indvars -loop-unroll -simplifycfg -S -verify-dom-info -verify-loop-info > %t
; RUN: grep {volatile store} %t | count 3
; RUN: grep {store volatile} %t | count 3
; RUN: not grep {br label} %t
; Jump threading should not prevent this loop from being unrolled.

View File

@ -1,4 +1,4 @@
; RUN: opt < %s -licm -S | grep {volatile store}
; RUN: opt < %s -licm -S | grep {store volatile}
; PR1435
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
target triple = "i686-apple-darwin8"

View File

@ -15,7 +15,7 @@ for.body4.lr.ph:
br label %for.body4
; CHECK: for.body4:
; CHECK: volatile load i16* @g_39
; CHECK: load volatile i16* @g_39
for.body4:
%l_612.11 = phi i32* [ undef, %for.body4.lr.ph ], [ %call19, %for.body4 ]

View File

@ -65,7 +65,7 @@ Loop:
br i1 true, label %Out, label %Loop
; CHECK: Loop:
; CHECK-NEXT: volatile load
; CHECK-NEXT: load volatile
Out: ; preds = %Loop
ret void

View File

@ -25,7 +25,7 @@ entry:
; CHECK: define void @test1(i8* %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = tail call i8* @objc_retain(i8* %p) nounwind
; CHECK-NEXT: %tmp = volatile load i8** @x, align 8
; CHECK-NEXT: %tmp = load volatile i8** @x, align 8
; CHECK-NEXT: store i8* %0, i8** @x, align 8
; CHECK-NEXT: tail call void @objc_release(i8* %tmp) nounwind
; CHECK-NEXT: ret void
@ -45,7 +45,7 @@ entry:
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = tail call i8* @objc_retain(i8* %p) nounwind
; CHECK-NEXT: %tmp = load i8** @x, align 8
; CHECK-NEXT: volatile store i8* %0, i8** @x, align 8
; CHECK-NEXT: store volatile i8* %0, i8** @x, align 8
; CHECK-NEXT: tail call void @objc_release(i8* %tmp) nounwind
; CHECK-NEXT: ret void
; CHECK-NEXT: }

View File

@ -1,5 +1,5 @@
; RUN: opt < %s -scalarrepl -S | grep {volatile load}
; RUN: opt < %s -scalarrepl -S | grep {volatile store}
; RUN: opt < %s -scalarrepl -S | grep {load volatile}
; RUN: opt < %s -scalarrepl -S | grep {store volatile}
define i32 @voltest(i32 %T) {
%A = alloca {i32, i32}

View File

@ -11,14 +11,14 @@ entry:
br i1 %0, label %bb, label %return
bb: ; preds = %entry
%1 = volatile load i32* null
%1 = load volatile i32* null
unreachable
br label %return
return: ; preds = %entry
ret void
; CHECK: @test1
; CHECK: volatile load
; CHECK: load volatile
}
; rdar://7958343
@ -35,10 +35,10 @@ entry:
; PR7369
define void @test3() nounwind {
entry:
volatile store i32 4, i32* null
store volatile i32 4, i32* null
ret void
; CHECK: @test3
; CHECK: volatile store i32 4, i32* null
; CHECK: store volatile i32 4, i32* null
; CHECK: ret
}

View File

@ -10,26 +10,26 @@ declare i32 @memcmp(i8*, i8*, i32)
define void @test(i8* %P, i8* %Q, i32 %N, i32* %IP, i1* %BP) {
%A = call i32 @memcmp( i8* %P, i8* %P, i32 %N ) ; <i32> [#uses=1]
; CHECK-NOT: call {{.*}} memcmp
; CHECK: volatile store
volatile store i32 %A, i32* %IP
; CHECK: store volatile
store volatile i32 %A, i32* %IP
%B = call i32 @memcmp( i8* %P, i8* %Q, i32 0 ) ; <i32> [#uses=1]
; CHECK-NOT: call {{.*}} memcmp
; CHECK: volatile store
volatile store i32 %B, i32* %IP
; CHECK: store volatile
store volatile i32 %B, i32* %IP
%C = call i32 @memcmp( i8* %P, i8* %Q, i32 1 ) ; <i32> [#uses=1]
; CHECK: load
; CHECK: zext
; CHECK: load
; CHECK: zext
; CHECK: sub
; CHECK: volatile store
volatile store i32 %C, i32* %IP
%F = call i32 @memcmp(i8* getelementptr ([4 x i8]* @hel, i32 0, i32 0),
i8* getelementptr ([8 x i8]* @hello_u, i32 0, i32 0),
i32 3)
; CHECK: store volatile
store volatile i32 %C, i32* %IP
%F = call i32 @memcmp(i8* getelementptr ([4 x i8]* @hel, i32 0, i32 0),
i8* getelementptr ([8 x i8]* @hello_u, i32 0, i32 0),
i32 3)
; CHECK-NOT: call {{.*}} memcmp
; CHECK: volatile store
volatile store i32 %F, i32* %IP
; CHECK: store volatile
store volatile i32 %F, i32* %IP
ret void
}