When a set of bitmask operations, typically from a bitfield initialization, only modifies the low bytes of a value,

we can narrow the store to only over-write the affected bytes.

llvm-svn: 111568
This commit is contained in:
Owen Anderson 2010-08-19 22:15:40 +00:00
parent be77e3bd6e
commit bb723b228a
2 changed files with 66 additions and 0 deletions

View File

@ -14,11 +14,13 @@
#include "InstCombine.h" #include "InstCombine.h"
#include "llvm/IntrinsicInst.h" #include "llvm/IntrinsicInst.h"
#include "llvm/Analysis/Loads.h" #include "llvm/Analysis/Loads.h"
#include "llvm/Support/PatternMatch.h"
#include "llvm/Target/TargetData.h" #include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/Statistic.h" #include "llvm/ADT/Statistic.h"
using namespace llvm; using namespace llvm;
using namespace PatternMatch;
STATISTIC(NumDeadStore, "Number of dead stores eliminated"); STATISTIC(NumDeadStore, "Number of dead stores eliminated");
@ -473,6 +475,49 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
if (SI.isVolatile()) return 0; // Don't hack volatile stores. if (SI.isVolatile()) return 0; // Don't hack volatile stores.
// Attempt to narrow sequences where we load a wide value, perform bitmasks
// that only affect the low bits of it, and then store it back. This
// typically arises from bitfield initializers in C++.
ConstantInt *CI1 =0, *CI2 = 0;
Value *Ld = 0;
if (getTargetData() &&
match(SI.getValueOperand(),
m_And(m_Or(m_Value(Ld), m_ConstantInt(CI1)), m_ConstantInt(CI2))) &&
isa<LoadInst>(Ld) &&
equivalentAddressValues(cast<LoadInst>(Ld)->getPointerOperand(), Ptr)) {
APInt OrMask = CI1->getValue();
APInt AndMask = CI2->getValue();
// Compute the prefix of the value that is unmodified by the bitmasking.
unsigned LeadingAndOnes = AndMask.countLeadingOnes();
unsigned LeadingOrZeros = OrMask.countLeadingZeros();
unsigned Prefix = std::min(LeadingAndOnes, LeadingOrZeros);
uint64_t NewWidth = AndMask.getBitWidth() - Prefix;
if (!isPowerOf2_64(NewWidth)) NewWidth = NextPowerOf2(NewWidth);
// If we can find a power-of-2 prefix (and if the values we're working with
// are themselves POT widths), then we can narrow the store. We rely on
// later iterations of instcombine to propagate the demanded bits to narrow
// the other computations in the chain.
if (NewWidth < AndMask.getBitWidth() &&
isPowerOf2_64(AndMask.getBitWidth())) {
const Type *NewType = IntegerType::get(Ptr->getContext(), NewWidth);
const Type *NewPtrType = PointerType::getUnqual(NewType);
Value *NewVal = Builder->CreateTrunc(SI.getValueOperand(), NewType);
Value *NewPtr = Builder->CreateBitCast(Ptr, NewPtrType);
// On big endian targets, we need to offset from the original pointer
// in order to store to the low-bit suffix.
if (getTargetData()->isBigEndian()) {
uint64_t GEPOffset = (AndMask.getBitWidth() - NewWidth) / 8;
NewPtr = Builder->CreateConstGEP1_64(NewPtr, GEPOffset);
}
return new StoreInst(NewVal, NewPtr);
}
}
// store X, null -> turns into 'unreachable' in SimplifyCFG // store X, null -> turns into 'unreachable' in SimplifyCFG
if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) { if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
if (!isa<UndefValue>(Val)) { if (!isa<UndefValue>(Val)) {

View File

@ -0,0 +1,21 @@
; RUN: opt -S -instcombine %s | not grep and
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin10.0.0"
%class.A = type { i8, [3 x i8] }
define void @_ZN1AC2Ev(%class.A* %this) nounwind ssp align 2 {
entry:
%0 = bitcast %class.A* %this to i32* ; <i32*> [#uses=5]
%1 = load i32* %0, align 4 ; <i32> [#uses=1]
%2 = and i32 %1, -8 ; <i32> [#uses=2]
store i32 %2, i32* %0, align 4
%3 = and i32 %2, -57 ; <i32> [#uses=1]
%4 = or i32 %3, 8 ; <i32> [#uses=2]
store i32 %4, i32* %0, align 4
%5 = and i32 %4, -65 ; <i32> [#uses=2]
store i32 %5, i32* %0, align 4
%6 = and i32 %5, -129 ; <i32> [#uses=1]
store i32 %6, i32* %0, align 4
ret void
}