summaryrefslogtreecommitdiffstats
path: root/gnu/llvm/lib/Analysis/Loads.cpp
diff options
context:
space:
mode:
authorpatrick <patrick@openbsd.org>2017-01-14 19:55:43 +0000
committerpatrick <patrick@openbsd.org>2017-01-14 19:55:43 +0000
commitbd3306aecb3a15e8967143b8cdbbccf2b1b19b74 (patch)
tree309a8132b44564b9e634c0da6815187ce8eab27c /gnu/llvm/lib/Analysis/Loads.cpp
parentkillp -a should not kill the window if only one pane. (diff)
downloadwireguard-openbsd-bd3306aecb3a15e8967143b8cdbbccf2b1b19b74.tar.xz
wireguard-openbsd-bd3306aecb3a15e8967143b8cdbbccf2b1b19b74.zip
Import LLVM 3.9.1 including clang and lld.
Diffstat (limited to 'gnu/llvm/lib/Analysis/Loads.cpp')
-rw-r--r--gnu/llvm/lib/Analysis/Loads.cpp191
1 files changed, 162 insertions, 29 deletions
diff --git a/gnu/llvm/lib/Analysis/Loads.cpp b/gnu/llvm/lib/Analysis/Loads.cpp
index 4b2fa3c6505..75426b54195 100644
--- a/gnu/llvm/lib/Analysis/Loads.cpp
+++ b/gnu/llvm/lib/Analysis/Loads.cpp
@@ -21,8 +21,125 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/Statepoint.h"
+
using namespace llvm;
+static bool isAligned(const Value *Base, const APInt &Offset, unsigned Align,
+ const DataLayout &DL) {
+ APInt BaseAlign(Offset.getBitWidth(), Base->getPointerAlignment(DL));
+
+ if (!BaseAlign) {
+ Type *Ty = Base->getType()->getPointerElementType();
+ if (!Ty->isSized())
+ return false;
+ BaseAlign = DL.getABITypeAlignment(Ty);
+ }
+
+ APInt Alignment(Offset.getBitWidth(), Align);
+
+ assert(Alignment.isPowerOf2() && "must be a power of 2!");
+ return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1));
+}
+
+static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) {
+ Type *Ty = Base->getType();
+ assert(Ty->isSized() && "must be sized");
+ APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
+ return isAligned(Base, Offset, Align, DL);
+}
+
+/// Test if V is always a pointer to allocated and suitably aligned memory for
+/// a simple load or store.
+static bool isDereferenceableAndAlignedPointer(
+ const Value *V, unsigned Align, const APInt &Size, const DataLayout &DL,
+ const Instruction *CtxI, const DominatorTree *DT,
+ SmallPtrSetImpl<const Value *> &Visited) {
+ // Note that it is not safe to speculate into a malloc'd region because
+ // malloc may return null.
+
+ // bitcast instructions are no-ops as far as dereferenceability is concerned.
+ if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V))
+ return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size,
+ DL, CtxI, DT, Visited);
+
+ bool CheckForNonNull = false;
+ APInt KnownDerefBytes(Size.getBitWidth(),
+ V->getPointerDereferenceableBytes(DL, CheckForNonNull));
+ if (KnownDerefBytes.getBoolValue()) {
+ if (KnownDerefBytes.uge(Size))
+ if (!CheckForNonNull || isKnownNonNullAt(V, CtxI, DT))
+ return isAligned(V, Align, DL);
+ }
+
+ // For GEPs, determine if the indexing lands within the allocated object.
+ if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+ const Value *Base = GEP->getPointerOperand();
+
+ APInt Offset(DL.getPointerTypeSizeInBits(GEP->getType()), 0);
+ if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
+ !Offset.urem(APInt(Offset.getBitWidth(), Align)).isMinValue())
+ return false;
+
+ // If the base pointer is dereferenceable for Offset+Size bytes, then the
+ // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
+ // pointer is aligned to Align bytes, and the Offset is divisible by Align
+ // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
+ // aligned to Align bytes.
+
+ return Visited.insert(Base).second &&
+ isDereferenceableAndAlignedPointer(Base, Align, Offset + Size, DL,
+ CtxI, DT, Visited);
+ }
+
+ // For gc.relocate, look through relocations
+ if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
+ return isDereferenceableAndAlignedPointer(
+ RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited);
+
+ if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
+ return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size,
+ DL, CtxI, DT, Visited);
+
+ if (auto CS = ImmutableCallSite(V))
+ if (const Value *RV = CS.getReturnedArgOperand())
+ return isDereferenceableAndAlignedPointer(RV, Align, Size, DL, CtxI, DT,
+ Visited);
+
+ // If we don't know, assume the worst.
+ return false;
+}
+
+bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
+ const DataLayout &DL,
+ const Instruction *CtxI,
+ const DominatorTree *DT) {
+ // When dereferenceability information is provided by a dereferenceable
+ // attribute, we know exactly how many bytes are dereferenceable. If we can
+ // determine the exact offset to the attributed variable, we can use that
+ // information here.
+ Type *VTy = V->getType();
+ Type *Ty = VTy->getPointerElementType();
+
+ // Require ABI alignment for loads without alignment specification
+ if (Align == 0)
+ Align = DL.getABITypeAlignment(Ty);
+
+ if (!Ty->isSized())
+ return false;
+
+ SmallPtrSet<const Value *, 32> Visited;
+ return ::isDereferenceableAndAlignedPointer(
+ V, Align, APInt(DL.getTypeSizeInBits(VTy), DL.getTypeStoreSize(Ty)), DL,
+ CtxI, DT, Visited);
+}
+
+bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL,
+ const Instruction *CtxI,
+ const DominatorTree *DT) {
+ return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT);
+}
+
/// \brief Test if A and B will obviously have the same value.
///
/// This includes recognizing that %t0 and %t1 will have the same
@@ -56,21 +173,29 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
/// \brief Check if executing a load of this pointer value cannot trap.
///
+/// If DT and ScanFrom are specified this method performs context-sensitive
+/// analysis and returns true if it is safe to load immediately before ScanFrom.
+///
/// If it is not obviously safe to load from the specified pointer, we do
/// a quick local scan of the basic block containing \c ScanFrom, to determine
/// if the address is already accessed.
///
/// This uses the pointee type to determine how many bytes need to be safe to
/// load from the pointer.
-bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
- unsigned Align) {
- const DataLayout &DL = ScanFrom->getModule()->getDataLayout();
-
+bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align,
+ const DataLayout &DL,
+ Instruction *ScanFrom,
+ const DominatorTree *DT) {
// Zero alignment means that the load has the ABI alignment for the target
if (Align == 0)
Align = DL.getABITypeAlignment(V->getType()->getPointerElementType());
assert(isPowerOf2_32(Align));
+ // If DT is not specified we can't make context-sensitive query
+ const Instruction* CtxI = DT ? ScanFrom : nullptr;
+ if (isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT))
+ return true;
+
int64_t ByteOffset = 0;
Value *Base = V;
Base = GetPointerBaseWithConstantOffset(V, ByteOffset, DL);
@@ -86,9 +211,9 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
BaseAlign = AI->getAlignment();
} else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
// Global variables are not necessarily safe to load from if they are
- // overridden. Their size may change or they may be weak and require a test
- // to determine if they were in fact provided.
- if (!GV->mayBeOverridden()) {
+ // interposed arbitrarily. Their size may change or they may be weak and
+ // require a test to determine if they were in fact provided.
+ if (!GV->isInterposable()) {
BaseType = GV->getType()->getElementType();
BaseAlign = GV->getAlignment();
}
@@ -113,6 +238,9 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
}
}
+ if (!ScanFrom)
+ return false;
+
// Otherwise, be a little bit aggressive by scanning the local block where we
// want to check to see if the pointer is already being loaded or stored
// from/to. If so, the previous load or store would have already trapped,
@@ -174,33 +302,24 @@ llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
"to scan backward from a given instruction, when searching for "
"available loaded value"));
-/// \brief Scan the ScanBB block backwards to see if we have the value at the
-/// memory address *Ptr locally available within a small number of instructions.
-///
-/// The scan starts from \c ScanFrom. \c MaxInstsToScan specifies the maximum
-/// instructions to scan in the block. If it is set to \c 0, it will scan the whole
-/// block.
-///
-/// If the value is available, this function returns it. If not, it returns the
-/// iterator for the last validated instruction that the value would be live
-/// through. If we scanned the entire block and didn't find something that
-/// invalidates \c *Ptr or provides it, \c ScanFrom is left at the last
-/// instruction processed and this returns null.
-///
-/// You can also optionally specify an alias analysis implementation, which
-/// makes this more precise.
-///
-/// If \c AATags is non-null and a load or store is found, the AA tags from the
-/// load or store are recorded there. If there are no AA tags or if no access is
-/// found, it is left unmodified.
-Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
+Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB,
BasicBlock::iterator &ScanFrom,
unsigned MaxInstsToScan,
- AliasAnalysis *AA, AAMDNodes *AATags) {
+ AliasAnalysis *AA, AAMDNodes *AATags,
+ bool *IsLoadCSE) {
if (MaxInstsToScan == 0)
MaxInstsToScan = ~0U;
- Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();
+ Value *Ptr = Load->getPointerOperand();
+ Type *AccessTy = Load->getType();
+
+ // We can never remove a volatile load
+ if (Load->isVolatile())
+ return nullptr;
+
+ // Anything stronger than unordered is currently unimplemented.
+ if (!Load->isUnordered())
+ return nullptr;
const DataLayout &DL = ScanBB->getModule()->getDataLayout();
@@ -231,8 +350,16 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
if (AreEquivalentAddressValues(
LI->getPointerOperand()->stripPointerCasts(), StrippedPtr) &&
CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
+
+ // We can value forward from an atomic to a non-atomic, but not the
+ // other way around.
+ if (LI->isAtomic() < Load->isAtomic())
+ return nullptr;
+
if (AATags)
LI->getAAMetadata(*AATags);
+ if (IsLoadCSE)
+ *IsLoadCSE = true;
return LI;
}
@@ -244,6 +371,12 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
if (AreEquivalentAddressValues(StorePtr, StrippedPtr) &&
CastInst::isBitOrNoopPointerCastable(SI->getValueOperand()->getType(),
AccessTy, DL)) {
+
+ // We can value forward from an atomic to a non-atomic, but not the
+ // other way around.
+ if (SI->isAtomic() < Load->isAtomic())
+ return nullptr;
+
if (AATags)
SI->getAAMetadata(*AATags);
return SI->getOperand(0);