No functionality change. Reflow lines that could fit on one line. Break lines

that had 80-column violations. Remove spurious emacs mode markers on .cpp files.

llvm-svn: 191797
This commit is contained in:
Nick Lewycky 2013-10-01 21:51:38 +00:00
parent 8f5225b3a7
commit 5fa40c3b9e
7 changed files with 20 additions and 29 deletions

View File

@ -470,8 +470,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
getContext().getSizeType());
}
// Atomic address is the first or second parameter
Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
getContext().VoidPtrTy);
Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
std::string LibCallName;
QualType RetTy;
@ -494,8 +493,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
getContext().VoidPtrTy);
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy);
Args.add(RValue::get(Order),
getContext().IntTy);
Args.add(RValue::get(Order), getContext().IntTy);
Order = OrderFail;
break;
// void __atomic_exchange(size_t size, void *mem, void *val, void *return,
@ -873,8 +871,7 @@ llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
/// Note that the r-value is expected to be an r-value *of the atomic
/// type*; this means that for aggregate r-values, it should include
/// storage for any padding that was necessary.
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
bool isInit) {
void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
// If this is an aggregate r-value, it should agree in type except
// maybe for address-space qualification.
assert(!rvalue.isAggregate() ||

View File

@ -1,4 +1,4 @@
//===----- CGCXXABI.cpp - Interface to C++ ABIs -----------------*- C++ -*-===//
//===----- CGCXXABI.cpp - Interface to C++ ABIs ---------------------------===//
//
// The LLVM Compiler Infrastructure
//

View File

@ -1,4 +1,4 @@
//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
//
// The LLVM Compiler Infrastructure
//
@ -1322,9 +1322,11 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
if (const CXXMethodDecl *MD =
dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
if (MD->isVirtual() && Arg == CXXABIThisDecl)
V = CGM.getCXXABI().adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
V = CGM.getCXXABI().
adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
}
// Because of merging of function types from multiple decls it is

View File

@ -1164,8 +1164,7 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
bool Volatile, unsigned Alignment,
QualType Ty,
llvm::MDNode *TBAAInfo,
QualType Ty, llvm::MDNode *TBAAInfo,
bool isInit, QualType TBAABaseType,
uint64_t TBAAOffset) {
@ -1179,14 +1178,11 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
// Our source is a vec3, do a shuffle vector to make it a vec4.
SmallVector<llvm::Constant*, 4> Mask;
Mask.push_back(llvm::ConstantInt::get(
llvm::Type::getInt32Ty(VMContext),
Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
0));
Mask.push_back(llvm::ConstantInt::get(
llvm::Type::getInt32Ty(VMContext),
Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
1));
Mask.push_back(llvm::ConstantInt::get(
llvm::Type::getInt32Ty(VMContext),
Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
2));
Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext)));
@ -1341,7 +1337,8 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
bool isInit) {
if (!Dst.isSimple()) {
if (Dst.isVectorElt()) {
// Read/modify/write the vector, inserting the new element.

View File

@ -272,8 +272,8 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
// And the rest of the call args
EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), Callee,
ReturnValue, Args);
return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
Callee, ReturnValue, Args);
}
RValue
@ -441,8 +441,7 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
assert(!getContext().getAsConstantArrayType(E->getType())
&& "EmitSynthesizedCXXCopyCtor - Copied-in Array");
EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
E->arg_begin(), E->arg_end());
EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E->arg_begin(), E->arg_end());
}
static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,

View File

@ -1048,8 +1048,6 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
FunctionType::ExtInfo(),
RequiredArgs::All),
copyCppAtomicObjectFn, ReturnValueSlot(), args);
}
@ -2784,8 +2782,7 @@ CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
// If the RHS was emitted retained, expand this.
if (hasImmediateRetain) {
llvm::Value *oldValue =
EmitLoadOfScalar(lvalue);
llvm::Value *oldValue = EmitLoadOfScalar(lvalue);
EmitStoreOfScalar(value, lvalue);
EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime());
} else {

View File

@ -1439,8 +1439,7 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
imp = EnforceType(Builder, imp, MSI.MessengerType);
llvm::Instruction *call;
RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs,
0, &call);
RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs, 0, &call);
call->setMetadata(msgSendMDKind, node);