diff --git a/llvm/docs/CompilerWriterInfo.rst b/llvm/docs/CompilerWriterInfo.rst index 240271a1a478..606b5f5afec8 100644 --- a/llvm/docs/CompilerWriterInfo.rst +++ b/llvm/docs/CompilerWriterInfo.rst @@ -113,7 +113,7 @@ XCore ----- * `The XMOS XS1 Architecture (ISA) `_ -* `Tools Developement Guide (includes ABI) `_ +* `Tools Development Guide (includes ABI) `_ Other relevant lists -------------------- diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst index fff627b8bb78..752e3578222d 100644 --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -6969,7 +6969,7 @@ Semantics: On platforms with coherent instruction and data caches (e.g. x86), this intrinsic is a nop. On platforms with non-coherent instruction and data -cache (e.g. ARM, MIPS), the intrinsic is lowered either to appropiate +cache (e.g. ARM, MIPS), the intrinsic is lowered either to appropriate instructions or a system call, if cache flushing requires special privileges. diff --git a/llvm/include/llvm/CodeGen/PBQP/Graph.h b/llvm/include/llvm/CodeGen/PBQP/Graph.h index 1b960381c7b9..a55f0ea96c0a 100644 --- a/llvm/include/llvm/CodeGen/PBQP/Graph.h +++ b/llvm/include/llvm/CodeGen/PBQP/Graph.h @@ -29,12 +29,12 @@ namespace PBQP { typedef unsigned NodeId; typedef unsigned EdgeId; - /// \brief Returns a value representing an invalid (non-existant) node. + /// \brief Returns a value representing an invalid (non-existent) node. static NodeId invalidNodeId() { return std::numeric_limits::max(); } - /// \brief Returns a value representing an invalid (non-existant) edge. + /// \brief Returns a value representing an invalid (non-existent) edge. static EdgeId invalidEdgeId() { return std::numeric_limits::max(); } diff --git a/llvm/include/llvm/IR/DiagnosticInfo.h b/llvm/include/llvm/IR/DiagnosticInfo.h index 392c14b76a11..fb3c9b6302dc 100644 --- a/llvm/include/llvm/IR/DiagnosticInfo.h +++ b/llvm/include/llvm/IR/DiagnosticInfo.h @@ -229,7 +229,7 @@ private: /// Name of the input file associated with this diagnostic. const char *FileName; - /// Line number where the diagnostic occured. If 0, no line number will + /// Line number where the diagnostic occurred. If 0, no line number will /// be emitted in the message. unsigned LineNum; diff --git a/llvm/lib/IR/Use.cpp b/llvm/lib/IR/Use.cpp index e40f3e994db4..047861c2586f 100644 --- a/llvm/lib/IR/Use.cpp +++ b/llvm/lib/IR/Use.cpp @@ -49,7 +49,7 @@ unsigned Use::getOperandNo() const { return this - getUser()->op_begin(); } -// Sets up the waymarking algoritm's tags for a series of Uses. See the +// Sets up the waymarking algorithm's tags for a series of Uses. See the // algorithm details here: // // http://www.llvm.org/docs/ProgrammersManual.html#UserLayout diff --git a/llvm/lib/Target/AArch64/AArch64Schedule.td b/llvm/lib/Target/AArch64/AArch64Schedule.td index ec8450b9c1fc..6ec47dbaa589 100644 --- a/llvm/lib/Target/AArch64/AArch64Schedule.td +++ b/llvm/lib/Target/AArch64/AArch64Schedule.td @@ -59,7 +59,7 @@ def ReadFPALU : SchedRead; // Floating Point MAC, Mul, Div, Sqrt // Most processors will simply send all of these down a dedicated pipe, but -// they're explicitly seperated here for flexibility of modeling later. May +// they're explicitly separated here for flexibility of modeling later. May // consider consolidating them into a single WriteFPXXXX type in the future. def WriteFPMAC : SchedWrite; def WriteFPMul : SchedWrite; diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp index 58d184ce9581..c18e74115a26 100644 --- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp +++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp @@ -1675,7 +1675,7 @@ void ARMFrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const { if (MF.getFunction()->isVarArg()) report_fatal_error("Segmented stacks do not support vararg functions."); if (!ST->isTargetAndroid() && !ST->isTargetLinux()) - report_fatal_error("Segmented stacks not supported on this platfrom."); + report_fatal_error("Segmented stacks not supported on this platform."); MachineBasicBlock &prologueMBB = MF.front(); MachineFrameInfo *MFI = MF.getFrameInfo(); diff --git a/llvm/lib/Target/ARM64/ARM64AdvSIMDScalarPass.cpp b/llvm/lib/Target/ARM64/ARM64AdvSIMDScalarPass.cpp index 83f8cdae49c1..83a3883fec83 100644 --- a/llvm/lib/Target/ARM64/ARM64AdvSIMDScalarPass.cpp +++ b/llvm/lib/Target/ARM64/ARM64AdvSIMDScalarPass.cpp @@ -14,7 +14,7 @@ //===----------------------------------------------------------------------===// // TODO: Graph based predicate heuristics. // Walking the instruction list linearly will get many, perhaps most, of -// the cases, but to do a truly throrough job of this, we need a more +// the cases, but to do a truly thorough job of this, we need a more // wholistic approach. // // This optimization is very similar in spirit to the register allocator's @@ -74,7 +74,7 @@ private: // instruction. "add Xd, Xn, Xm" ==> "add Dd, Da, Db", for example. bool isProfitableToTransform(const MachineInstr *MI) const; - // tranformInstruction - Perform the transformation of an instruction + // transformInstruction - Perform the transformation of an instruction // to its equivalant AdvSIMD scalar instruction. Update inputs and outputs // to be the correct register class, minimizing cross-class copies. void transformInstruction(MachineInstr *MI); @@ -252,7 +252,7 @@ bool ARM64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const { if (AllUsesAreCopies) --NumNewCopies; - // If a tranform will not increase the number of cross-class copies required, + // If a transform will not increase the number of cross-class copies required, // return true. if (NumNewCopies <= NumRemovableCopies) return true; @@ -273,7 +273,7 @@ static MachineInstr *insertCopy(const ARM64InstrInfo *TII, MachineInstr *MI, return MIB; } -// tranformInstruction - Perform the transformation of an instruction +// transformInstruction - Perform the transformation of an instruction // to its equivalant AdvSIMD scalar instruction. Update inputs and outputs // to be the correct register class, minimizing cross-class copies. void ARM64AdvSIMDScalar::transformInstruction(MachineInstr *MI) { diff --git a/llvm/lib/Target/ARM64/ARM64CollectLOH.cpp b/llvm/lib/Target/ARM64/ARM64CollectLOH.cpp index 7da500b9b5cb..8a50c860a451 100644 --- a/llvm/lib/Target/ARM64/ARM64CollectLOH.cpp +++ b/llvm/lib/Target/ARM64/ARM64CollectLOH.cpp @@ -459,14 +459,14 @@ static void finitReachingDef(BlockToSetOfInstrsPerColor &In, delete[] IT->second; } -/// Reaching definiton algorithm. +/// Reaching definition algorithm. /// \param MF function on which the algorithm will operate. /// \param[out] ColorOpToReachedUses will contain the result of the reaching /// def algorithm. /// \param ADRPMode specify whether the reaching def algorithm should be tuned /// for ADRP optimization. \see initReachingDef for more details. /// \param DummyOp if not NULL, the algorithm will work at -/// basic block scope and will set for every exposed defintion a use to +/// basic block scope and will set for every exposed definition a use to /// @p DummyOp. /// \pre ColorOpToReachedUses is an array of at least number of registers of /// InstrToInstrs. @@ -584,7 +584,7 @@ static bool isCandidateStore(const MachineInstr *Instr) { return false; } -/// Given the result of a reaching defintion algorithm in ColorOpToReachedUses, +/// Given the result of a reaching definition algorithm in ColorOpToReachedUses, /// Build the Use to Defs information and filter out obvious non-LOH candidates. /// In ADRPMode, non-LOH candidates are "uses" with non-ADRP definitions. /// In non-ADRPMode, non-LOH candidates are "uses" with several definition, diff --git a/llvm/lib/Target/ARM64/ARM64ConditionalCompares.cpp b/llvm/lib/Target/ARM64/ARM64ConditionalCompares.cpp index b495afaa38b7..8accfa6eee04 100644 --- a/llvm/lib/Target/ARM64/ARM64ConditionalCompares.cpp +++ b/llvm/lib/Target/ARM64/ARM64ConditionalCompares.cpp @@ -147,7 +147,7 @@ public: /// else. MachineBasicBlock *Head; - /// The block containing cmp+br.cond with a sucessor shared with Head. + /// The block containing cmp+br.cond with a successor shared with Head. MachineBasicBlock *CmpBB; /// The common successor for Head and CmpBB. @@ -420,7 +420,7 @@ bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB, return false; } - // Only CmpMI is alowed to clobber the flags. + // Only CmpMI is allowed to clobber the flags. if (&*I != CmpMI && I->modifiesRegister(ARM64::CPSR, TRI)) { DEBUG(dbgs() << "Clobbers flags: " << *I); return false; diff --git a/llvm/lib/Target/ARM64/ARM64InstrFormats.td b/llvm/lib/Target/ARM64/ARM64InstrFormats.td index ee8241937168..8f7d6558961a 100644 --- a/llvm/lib/Target/ARM64/ARM64InstrFormats.td +++ b/llvm/lib/Target/ARM64/ARM64InstrFormats.td @@ -646,7 +646,7 @@ def simdimmtype10 : Operand, //--- -// Sytem management +// System management //--- // Base encoding for system instruction operands. diff --git a/llvm/lib/Target/ARM64/ARM64RegisterInfo.cpp b/llvm/lib/Target/ARM64/ARM64RegisterInfo.cpp index 095270791c83..45ed25aab1d9 100644 --- a/llvm/lib/Target/ARM64/ARM64RegisterInfo.cpp +++ b/llvm/lib/Target/ARM64/ARM64RegisterInfo.cpp @@ -76,7 +76,7 @@ ARM64RegisterInfo::getThisReturnPreservedMask(CallingConv::ID) const { BitVector ARM64RegisterInfo::getReservedRegs(const MachineFunction &MF) const { const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); - // FIXME: avoid re-calculating this everytime. + // FIXME: avoid re-calculating this every time. BitVector Reserved(getNumRegs()); Reserved.set(ARM64::SP); Reserved.set(ARM64::XZR); diff --git a/llvm/lib/Target/ARM64/ARM64RegisterInfo.td b/llvm/lib/Target/ARM64/ARM64RegisterInfo.td index 5f5093579ee5..83a8f70a8c7d 100644 --- a/llvm/lib/Target/ARM64/ARM64RegisterInfo.td +++ b/llvm/lib/Target/ARM64/ARM64RegisterInfo.td @@ -164,7 +164,7 @@ def tcGPR64 : RegisterClass<"ARM64", [i64], 64, (sub GPR64common, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28)>; -// GPR register classes for post increment ammount of vector load/store that +// GPR register classes for post increment amount of vector load/store that // has alternate printing when Rm=31 and prints a constant immediate value // equal to the total number of bytes transferred. def GPR64pi1 : RegisterOperand; diff --git a/llvm/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp b/llvm/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp index 1733dc563d1d..f28896c1f9e1 100644 --- a/llvm/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp +++ b/llvm/lib/Target/ARM64/MCTargetDesc/ARM64MachObjectWriter.cpp @@ -222,7 +222,7 @@ void ARM64MachObjectWriter::RecordRelocation( "difference"); // ARM64 always uses external relocations. If there is no symbol to use as - // a base address (a local symbol with no preceeding non-local symbol), + // a base address (a local symbol with no preceding non-local symbol), // error out. // // FIXME: We should probably just synthesize an external symbol and use diff --git a/llvm/lib/Transforms/Scalar/SampleProfile.cpp b/llvm/lib/Transforms/Scalar/SampleProfile.cpp index 20d6daab24bb..327859bc5cac 100644 --- a/llvm/lib/Transforms/Scalar/SampleProfile.cpp +++ b/llvm/lib/Transforms/Scalar/SampleProfile.cpp @@ -315,7 +315,7 @@ protected: /// \brief Name of the profile file to load. StringRef Filename; - /// \brief Flag indicating whether the profile input loaded succesfully. + /// \brief Flag indicating whether the profile input loaded successfully. bool ProfileIsValid; }; } diff --git a/llvm/test/CodeGen/ARM64/addr-mode-folding.ll b/llvm/test/CodeGen/ARM64/addr-mode-folding.ll index dff2331d29bb..08fb8c90c484 100644 --- a/llvm/test/CodeGen/ARM64/addr-mode-folding.ll +++ b/llvm/test/CodeGen/ARM64/addr-mode-folding.ll @@ -6,7 +6,7 @@ define i32 @fct(i32 %i1, i32 %i2) { ; CHECK: @fct ; Sign extension is used more than once, thus it should not be folded. -; CodeGenPrepare is not sharing sext accross uses, thus this is folded because +; CodeGenPrepare is not sharing sext across uses, thus this is folded because ; of that. ; _CHECK-NOT_: , sxtw] entry: diff --git a/llvm/test/CodeGen/ARM64/build-vector.ll b/llvm/test/CodeGen/ARM64/build-vector.ll index 1d137ae6e6d1..143d6894383f 100644 --- a/llvm/test/CodeGen/ARM64/build-vector.ll +++ b/llvm/test/CodeGen/ARM64/build-vector.ll @@ -6,7 +6,7 @@ define void @one_lane(i32* nocapture %out_int, i32 %skip0) nounwind { ; CHECK-LABEL: one_lane: ; CHECK: dup.16b v[[REG:[0-9]+]], wzr ; CHECK-NEXT: ins.b v[[REG]][0], w1 -; v and q are aliases, and str is prefered against st.16b when possible +; v and q are aliases, and str is preferred against st.16b when possible ; rdar://11246289 ; CHECK: str q[[REG]], [x0] ; CHECK: ret diff --git a/llvm/test/DebugInfo/X86/dbg-value-const-byref.ll b/llvm/test/DebugInfo/X86/dbg-value-const-byref.ll index 3d1e87da8c37..23dead3f8593 100644 --- a/llvm/test/DebugInfo/X86/dbg-value-const-byref.ll +++ b/llvm/test/DebugInfo/X86/dbg-value-const-byref.ll @@ -14,7 +14,7 @@ ; } ; ; Test that we generate valid debug info for optimized code, -; particularily variables that are described as constants and passed +; particularly variables that are described as constants and passed ; by reference. ; rdar://problem/14874886 ; diff --git a/llvm/test/MC/ARM64/separator.s b/llvm/test/MC/ARM64/separator.s index 18f34b99a0c4..e67deba825d9 100644 --- a/llvm/test/MC/ARM64/separator.s +++ b/llvm/test/MC/ARM64/separator.s @@ -1,6 +1,6 @@ ; RUN: llvm-mc -triple arm64-apple-darwin -show-encoding < %s | FileCheck %s -; ARM64 uses a multi-character statment separator, "%%". Check that we lex +; ARM64 uses a multi-character statement separator, "%%". Check that we lex ; it properly and recognize the multiple assembly statements on the line. ; To make sure the output assembly correctly handled the instructions, diff --git a/llvm/test/MC/Mips/micromips-el-fixup-data.s b/llvm/test/MC/Mips/micromips-el-fixup-data.s index 2293f63d46de..47538356bf37 100644 --- a/llvm/test/MC/Mips/micromips-el-fixup-data.s +++ b/llvm/test/MC/Mips/micromips-el-fixup-data.s @@ -2,7 +2,7 @@ # RUN: -mattr=+micromips 2>&1 -filetype=obj > %t.o # RUN: llvm-objdump %t.o -triple mipsel -mattr=+micromips -d | FileCheck %s -# Check that fixup data is writen in the microMIPS specific little endian +# Check that fixup data is written in the microMIPS specific little endian # byte order. .text