[NFC] fix trivial typos in comments and documents

"to to" -> "to"

llvm-svn: 323628
This commit is contained in:
Hiroshi Inoue 2018-01-29 05:17:03 +00:00
parent 1019f8a98e
commit c8e9245816
23 changed files with 23 additions and 23 deletions

View File

@ -324,7 +324,7 @@ However, some Linux distributions and some other or older BSDs sometimes have
extremely old versions of GCC. These steps attempt to help you upgrade you
compiler even on such a system. However, if at all possible, we encourage you
to use a recent version of a distribution with a modern system compiler that
meets these requirements. Note that it is tempting to to install a prior
meets these requirements. Note that it is tempting to install a prior
version of Clang and libc++ to be the host compiler, however libc++ was not
well tested or set up to build on Linux until relatively recently. As
a consequence, this guide suggests just using libstdc++ and a modern GCC as the

View File

@ -44,7 +44,7 @@ returns the target triple of the current machine.
auto TargetTriple = sys::getDefaultTargetTriple();
LLVM doesn't require us to to link in all the target
LLVM doesn't require us to link in all the target
functionality. For example, if we're just using the JIT, we don't need
the assembly printers. Similarly, if we're only targeting certain
architectures, we can only link in the functionality for those

View File

@ -275,7 +275,7 @@ protected:
enqueueUsers(GEPI);
}
// No-op intrinsics which we know don't escape the pointer to to logic in
// No-op intrinsics which we know don't escape the pointer to logic in
// some other function.
void visitDbgInfoIntrinsic(DbgInfoIntrinsic &I) {}
void visitMemIntrinsic(MemIntrinsic &I) {}

View File

@ -678,7 +678,7 @@ public:
const SCEV *LHS, const SCEV *RHS);
/// Test whether the backedge of the loop is protected by a conditional
/// between LHS and RHS. This is used to to eliminate casts.
/// between LHS and RHS. This is used to eliminate casts.
bool isLoopBackedgeGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS);

View File

@ -22,7 +22,7 @@
/// of an instruction should live. It asks the target which banks may be
/// used for each operand of the instruction and what is the cost. Then,
/// it chooses the solution which minimize the cost of the instruction plus
/// the cost of any move that may be needed to to the values into the right
/// the cost of any move that may be needed to the values into the right
/// register bank.
/// In other words, the cost for an instruction on a register bank RegBank
/// is: Cost of I on RegBank plus the sum of the cost for bringing the

View File

@ -43,7 +43,7 @@ class ToolOutputFile {
raw_fd_ostream OS;
public:
/// This constructor's arguments are passed to to raw_fd_ostream's
/// This constructor's arguments are passed to raw_fd_ostream's
/// constructor.
ToolOutputFile(StringRef Filename, std::error_code &EC,
sys::fs::OpenFlags Flags);

View File

@ -7267,7 +7267,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
continue;
// If this is a memory input, and if the operand is not indirect, do what we
// need to to provide an address for the memory input.
// need to provide an address for the memory input.
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
!OpInfo.isIndirect) {
assert((OpInfo.isMultipleAlternative ||

View File

@ -1985,7 +1985,7 @@ SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
// Extend back to to 64-bits.
// Extend back to 64-bits.
SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);

View File

@ -79,7 +79,7 @@ static cl::opt<bool> EnableLoadStoreVectorizer(
cl::init(true),
cl::Hidden);
// Option to to control global loads scalarization
// Option to control global loads scalarization
static cl::opt<bool> ScalarizeGlobal(
"amdgpu-scalarize-global-loads",
cl::desc("Enable global load scalarization"),

View File

@ -452,7 +452,7 @@ unsigned GCNIterativeScheduler::tryMaximizeOccupancy(unsigned TargetOcc) {
// TODO: assert Regions are sorted descending by pressure
const auto &ST = MF.getSubtarget<SISubtarget>();
const auto Occ = Regions.front()->MaxPressure.getOccupancy(ST);
DEBUG(dbgs() << "Trying to to improve occupancy, target = " << TargetOcc
DEBUG(dbgs() << "Trying to improve occupancy, target = " << TargetOcc
<< ", current = " << Occ << '\n');
auto NewOcc = TargetOcc;

View File

@ -1837,7 +1837,7 @@ bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
if (!MFI->isEntryFunction()) {
// Wait for any outstanding memory operations that the input registers may
// depend on. We can't track them and it's better to to the wait after the
// depend on. We can't track them and it's better to the wait after the
// costly call sequence.
// TODO: Could insert earlier and schedule more liberally with operations

View File

@ -687,7 +687,7 @@ bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
if (!MFI->isEntryFunction()) {
// Wait for any outstanding memory operations that the input registers may
// depend on. We can't track them and it's better to to the wait after the
// depend on. We can't track them and it's better to the wait after the
// costly call sequence.
// TODO: Could insert earlier and schedule more liberally with operations

View File

@ -219,7 +219,7 @@ static Optional<PPC::Predicate> getComparePred(CmpInst::Predicate Pred) {
// result consists of 4 bits, indicating lt, eq, gt and un (unordered),
// only one of which will be set. The result is generated by fcmpu
// instruction. However, bc instruction only inspects one of the first 3
// bits, so when un is set, bc instruction may jump to to an undesired
// bits, so when un is set, bc instruction may jump to an undesired
// place.
//
// More specifically, if we expect an unordered comparison and un is set, we

View File

@ -2675,7 +2675,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
(FrameReg == X86::EBP && VT == MVT::i32)) &&
"Invalid Frame Register!");
// Always make a copy of the frame register to to a vreg first, so that we
// Always make a copy of the frame register to a vreg first, so that we
// never directly reference the frame register (the TwoAddressInstruction-
// Pass doesn't like that).
unsigned SrcReg = createResultReg(RC);

View File

@ -13917,7 +13917,7 @@ static SDValue lowerV4X128VectorShuffle(const SDLoc &DL, MVT VT,
return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
}
// Try to lower to to vshuf64x2/vshuf32x4.
// Try to lower to vshuf64x2/vshuf32x4.
SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
unsigned PermMask = 0;
// Insure elements came from the same Op.

View File

@ -570,7 +570,7 @@ private:
// The ides is inspired from:
// "Partial Redundancy Elimination in SSA Form"
// ROBERT KENNEDY, SUN CHAN, SHIN-MING LIU, RAYMOND LO, PENG TU and FRED CHOW
// They use similar idea in the forward graph to to find fully redundant and
// They use similar idea in the forward graph to find fully redundant and
// partially redundant expressions, here it is used in the inverse graph to
// find fully anticipable instructions at merge point (post-dominator in
// the inverse CFG).

View File

@ -1675,7 +1675,7 @@ void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
}
// Step 3: Note that the population count is exactly the trip count of the
// loop in question, which enable us to to convert the loop from noncountable
// loop in question, which enable us to convert the loop from noncountable
// loop into a countable one. The benefit is twofold:
//
// - If the loop only counts population, the entire loop becomes dead after

View File

@ -22,7 +22,7 @@ define amdgpu_kernel void @test_no_round_size_1(i8 addrspace(1)* %out, i8 addrsp
ret void
}
; There are two objects, so one requires padding to to be correctly
; There are two objects, so one requires padding to be correctly
; aligned after the other.
; (38 -> 48) + 38 = 92

View File

@ -1,4 +1,4 @@
; The purpose of this test to to verify that the fltused symbol is emitted when
; The purpose of this test to verify that the fltused symbol is emitted when
; any function is called with floating point arguments on Windows. And that it
; is not emitted otherwise.

View File

@ -1,4 +1,4 @@
; The purpose of this test to to verify that the fltused symbol is emitted when
; The purpose of this test to verify that the fltused symbol is emitted when
; any function is called with floating point arguments on Windows. And that it
; is not emitted otherwise.

View File

@ -8,7 +8,7 @@
// Test that on ELF:
// 1. the debug info has a relocation to debug_abbrev and one to to debug_line.
// 1. the debug info has a relocation to debug_abbrev and one to debug_line.
// 2. the debug_aranges has relocations to text and debug_line.

View File

@ -51,7 +51,7 @@ entry:
; The 'test2_' prefixed functions test that we can discover the last callsite
; bonus after having inlined the prior call site. For this to to work, we need
; bonus after having inlined the prior call site. For this to work, we need
; a callsite dependent cost so we have a trivial predicate guarding all the
; cost, and set that in a particular direction.

View File

@ -496,7 +496,7 @@ class ThreadLocalRegistryImpl {
FALSE,
thread_id);
GTEST_CHECK_(thread != NULL);
// We need to to pass a valid thread ID pointer into CreateThread for it
// We need to pass a valid thread ID pointer into CreateThread for it
// to work correctly under Win98.
DWORD watcher_thread_id;
HANDLE watcher_thread = ::CreateThread(