Fix typos

llvm-svn: 210401
This commit is contained in:
Alp Toker 2014-06-07 21:23:09 +00:00
parent 17dd8efe9e
commit 5c53639492
6 changed files with 8 additions and 8 deletions

View File

@ -1414,7 +1414,7 @@ void DwarfUnit::applySubprogramAttributes(DISubprogram SP, DIE &SPDie) {
if (DISubprogram SPDecl = SP.getFunctionDeclaration()) { if (DISubprogram SPDecl = SP.getFunctionDeclaration()) {
DeclDie = getDIE(SPDecl); DeclDie = getDIE(SPDecl);
assert(DeclDie && "This DIE should've already been constructed when the " assert(DeclDie && "This DIE should've already been constructed when the "
"definition DIE was creaeted in " "definition DIE was created in "
"getOrCreateSubprogramDIE"); "getOrCreateSubprogramDIE");
DeclLinkageName = SPDecl.getLinkageName(); DeclLinkageName = SPDecl.getLinkageName();
} }

View File

@ -160,7 +160,7 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
replaceFrameIndices(Fn); replaceFrameIndices(Fn);
// If register scavenging is needed, as we've enabled doing it as a // If register scavenging is needed, as we've enabled doing it as a
// post-pass, scavenge the virtual registers that frame index elimiation // post-pass, scavenge the virtual registers that frame index elimination
// inserted. // inserted.
if (TRI->requiresRegisterScavenging(Fn) && FrameIndexVirtualScavenging) if (TRI->requiresRegisterScavenging(Fn) && FrameIndexVirtualScavenging)
scavengeFrameVirtualRegs(Fn); scavengeFrameVirtualRegs(Fn);

View File

@ -146,7 +146,7 @@ bool NVPTXImageOptimizer::replaceIsTypePTexture(Instruction &I) {
void NVPTXImageOptimizer::replaceWith(Instruction *From, ConstantInt *To) { void NVPTXImageOptimizer::replaceWith(Instruction *From, ConstantInt *To) {
// We implement "poor man's DCE" here to make sure any code that is no longer // We implement "poor man's DCE" here to make sure any code that is no longer
// live is actually unreachable and can be trivially eliminated by the // live is actually unreachable and can be trivially eliminated by the
// unreachable block elimiation pass. // unreachable block elimination pass.
for (CallInst::use_iterator UI = From->use_begin(), UE = From->use_end(); for (CallInst::use_iterator UI = From->use_begin(), UE = From->use_end();
UI != UE; ++UI) { UI != UE; ++UI) {
if (BranchInst *BI = dyn_cast<BranchInst>(*UI)) { if (BranchInst *BI = dyn_cast<BranchInst>(*UI)) {

View File

@ -3,7 +3,7 @@
; clang -Oz -c test1.cpp -emit-llvm -S -o ; clang -Oz -c test1.cpp -emit-llvm -S -o
; Verify that we generate shld insruction when we are optimizing for size, ; Verify that we generate shld insruction when we are optimizing for size,
; even for X86_64 processors that are known to have poor latency double ; even for X86_64 processors that are known to have poor latency double
; precision shift instuctions. ; precision shift instructions.
; uint64_t lshift10(uint64_t a, uint64_t b) ; uint64_t lshift10(uint64_t a, uint64_t b)
; { ; {
; return (a << 10) | (b >> 54); ; return (a << 10) | (b >> 54);
@ -25,7 +25,7 @@ attributes #0 = { minsize nounwind optsize readnone uwtable "less-precise-fpmad"
; clang -Os -c test2.cpp -emit-llvm -S ; clang -Os -c test2.cpp -emit-llvm -S
; Verify that we generate shld insruction when we are optimizing for size, ; Verify that we generate shld insruction when we are optimizing for size,
; even for X86_64 processors that are known to have poor latency double ; even for X86_64 processors that are known to have poor latency double
; precision shift instuctions. ; precision shift instructions.
; uint64_t lshift11(uint64_t a, uint64_t b) ; uint64_t lshift11(uint64_t a, uint64_t b)
; { ; {
; return (a << 11) | (b >> 53); ; return (a << 11) | (b >> 53);
@ -46,7 +46,7 @@ attributes #1 = { nounwind optsize readnone uwtable "less-precise-fpmad"="false"
; clang -O2 -c test2.cpp -emit-llvm -S ; clang -O2 -c test2.cpp -emit-llvm -S
; Verify that we do not generate shld insruction when we are not optimizing ; Verify that we do not generate shld insruction when we are not optimizing
; for size for X86_64 processors that are known to have poor latency double ; for size for X86_64 processors that are known to have poor latency double
; precision shift instuctions. ; precision shift instructions.
; uint64_t lshift12(uint64_t a, uint64_t b) ; uint64_t lshift12(uint64_t a, uint64_t b)
; { ; {
; return (a << 12) | (b >> 52); ; return (a << 12) | (b >> 52);

View File

@ -123,7 +123,7 @@
; CHECK: NULL ; CHECK: NULL
; CHECK-NOT: {{DW_TAG|NULL}} ; CHECK-NOT: {{DW_TAG|NULL}}
; FIXME: We probably shouldn't bother describing the implicit ; FIXME: We probably shouldn't bother describing the implicit
; import of the preceeding anonymous namespace. This should be fixed ; import of the preceding anonymous namespace. This should be fixed
; in clang. ; in clang.
; CHECK: DW_TAG_imported_module ; CHECK: DW_TAG_imported_module
; CHECK-NOT: {{DW_TAG|NULL}} ; CHECK-NOT: {{DW_TAG|NULL}}

View File

@ -4,7 +4,7 @@
foo: foo:
;----------------------------------------------------------------------------- ;-----------------------------------------------------------------------------
; Simple encodings (instuctions w/ no operands) ; Simple encodings (instructions w/ no operands)
;----------------------------------------------------------------------------- ;-----------------------------------------------------------------------------
nop nop