aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGuillaume Chatelet <gchatelet@google.com>2019-10-21 15:10:26 +0000
committerGuillaume Chatelet <gchatelet@google.com>2019-10-21 15:10:26 +0000
commitff6e4d503c7bb8ce6557fa2ff7afa839db58a85b (patch)
tree36a0cb41cc73afb8025eb7760f09aadc9acbe3f5
parenta3253c0261b44ba89ffa6e1adeaf227d085ec490 (diff)
downloadllvm-ff6e4d503c7bb8ce6557fa2ff7afa839db58a85b.tar.gz
[Alignment][NFC] Finish transition for `Loads`
Summary: This is patch is part of a series to introduce an Alignment type. See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html See this patch for the introduction of the type: https://reviews.llvm.org/D64790 Reviewers: courbet Subscribers: hiraditya, asbirlea, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D69253 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@375419 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/Analysis/Loads.h9
-rw-r--r--lib/Analysis/Loads.cpp89
-rw-r--r--lib/Analysis/MemDerefPrinter.cpp4
-rw-r--r--lib/Analysis/ValueTracking.cpp6
-rw-r--r--lib/CodeGen/MachineOperand.cpp3
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp6
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp16
-rw-r--r--lib/Transforms/Scalar/LICM.cpp3
-rw-r--r--lib/Transforms/Scalar/SROA.cpp12
-rw-r--r--lib/Transforms/Scalar/TailRecursionElimination.cpp2
-rw-r--r--lib/Transforms/Utils/SimplifyLibCalls.cpp3
11 files changed, 77 insertions, 76 deletions
diff --git a/include/llvm/Analysis/Loads.h b/include/llvm/Analysis/Loads.h
index b5884acf3b0..9604b2521e8 100644
--- a/include/llvm/Analysis/Loads.h
+++ b/include/llvm/Analysis/Loads.h
@@ -37,7 +37,8 @@ bool isDereferenceablePointer(const Value *V, Type *Ty,
/// performs context-sensitive analysis and returns true if the pointer is
/// dereferenceable at the specified instruction.
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
- unsigned Align, const DataLayout &DL,
+ MaybeAlign Alignment,
+ const DataLayout &DL,
const Instruction *CtxI = nullptr,
const DominatorTree *DT = nullptr);
@@ -45,7 +46,7 @@ bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
/// greater or equal than requested. If the context instruction is specified
/// performs context-sensitive analysis and returns true if the pointer is
/// dereferenceable at the specified instruction.
-bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
+bool isDereferenceableAndAlignedPointer(const Value *V, Align Alignment,
const APInt &Size, const DataLayout &DL,
const Instruction *CtxI = nullptr,
const DominatorTree *DT = nullptr);
@@ -58,7 +59,7 @@ bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
/// If it is not obviously safe to load from the specified pointer, we do a
/// quick local scan of the basic block containing ScanFrom, to determine if
/// the address is already accessed.
-bool isSafeToLoadUnconditionally(Value *V, unsigned Align, APInt &Size,
+bool isSafeToLoadUnconditionally(Value *V, MaybeAlign Alignment, APInt &Size,
const DataLayout &DL,
Instruction *ScanFrom = nullptr,
const DominatorTree *DT = nullptr);
@@ -82,7 +83,7 @@ bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
/// If it is not obviously safe to load from the specified pointer, we do a
/// quick local scan of the basic block containing ScanFrom, to determine if
/// the address is already accessed.
-bool isSafeToLoadUnconditionally(Value *V, Type *Ty, unsigned Align,
+bool isSafeToLoadUnconditionally(Value *V, Type *Ty, MaybeAlign Alignment,
const DataLayout &DL,
Instruction *ScanFrom = nullptr,
const DominatorTree *DT = nullptr);
diff --git a/lib/Analysis/Loads.cpp b/lib/Analysis/Loads.cpp
index a4fd49920ad..641e92eac78 100644
--- a/lib/Analysis/Loads.cpp
+++ b/lib/Analysis/Loads.cpp
@@ -50,7 +50,7 @@ static bool isAligned(const Value *Base, const APInt &Offset, Align Alignment,
/// Test if V is always a pointer to allocated and suitably aligned memory for
/// a simple load or store.
static bool isDereferenceableAndAlignedPointer(
- const Value *V, unsigned Align, const APInt &Size, const DataLayout &DL,
+ const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
const Instruction *CtxI, const DominatorTree *DT,
SmallPtrSetImpl<const Value *> &Visited) {
// Already visited? Bail out, we've likely hit unreachable code.
@@ -62,8 +62,8 @@ static bool isDereferenceableAndAlignedPointer(
// bitcast instructions are no-ops as far as dereferenceability is concerned.
if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V))
- return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size,
- DL, CtxI, DT, Visited);
+ return isDereferenceableAndAlignedPointer(BC->getOperand(0), Alignment,
+ Size, DL, CtxI, DT, Visited);
bool CheckForNonNull = false;
APInt KnownDerefBytes(Size.getBitWidth(),
@@ -76,7 +76,7 @@ static bool isDereferenceableAndAlignedPointer(
Type *Ty = V->getType();
assert(Ty->isSized() && "must be sized");
APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
- return isAligned(V, Offset, llvm::Align(Align), DL);
+ return isAligned(V, Offset, Alignment, DL);
}
// For GEPs, determine if the indexing lands within the allocated object.
@@ -85,7 +85,8 @@ static bool isDereferenceableAndAlignedPointer(
APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
- !Offset.urem(APInt(Offset.getBitWidth(), Align)).isMinValue())
+ !Offset.urem(APInt(Offset.getBitWidth(), Alignment.value()))
+ .isMinValue())
return false;
// If the base pointer is dereferenceable for Offset+Size bytes, then the
@@ -97,72 +98,69 @@ static bool isDereferenceableAndAlignedPointer(
// Offset and Size may have different bit widths if we have visited an
// addrspacecast, so we can't do arithmetic directly on the APInt values.
return isDereferenceableAndAlignedPointer(
- Base, Align, Offset + Size.sextOrTrunc(Offset.getBitWidth()),
- DL, CtxI, DT, Visited);
+ Base, Alignment, Offset + Size.sextOrTrunc(Offset.getBitWidth()), DL,
+ CtxI, DT, Visited);
}
// For gc.relocate, look through relocations
if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
return isDereferenceableAndAlignedPointer(
- RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited);
+ RelocateInst->getDerivedPtr(), Alignment, Size, DL, CtxI, DT, Visited);
if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
- return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size,
- DL, CtxI, DT, Visited);
+ return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Alignment,
+ Size, DL, CtxI, DT, Visited);
if (const auto *Call = dyn_cast<CallBase>(V))
if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
- return isDereferenceableAndAlignedPointer(RP, Align, Size, DL, CtxI, DT,
- Visited);
+ return isDereferenceableAndAlignedPointer(RP, Alignment, Size, DL, CtxI,
+ DT, Visited);
// If we don't know, assume the worst.
return false;
}
-bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
+bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Align Alignment,
const APInt &Size,
const DataLayout &DL,
const Instruction *CtxI,
const DominatorTree *DT) {
- assert(Align != 0 && "expected explicitly set alignment");
// Note: At the moment, Size can be zero. This ends up being interpreted as
// a query of whether [Base, V] is dereferenceable and V is aligned (since
// that's what the implementation happened to do). It's unclear if this is
// the desired semantic, but at least SelectionDAG does exercise this case.
SmallPtrSet<const Value *, 32> Visited;
- return ::isDereferenceableAndAlignedPointer(V, Align, Size, DL, CtxI, DT,
+ return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, DT,
Visited);
}
bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
- unsigned Align,
+ MaybeAlign MA,
const DataLayout &DL,
const Instruction *CtxI,
const DominatorTree *DT) {
+ if (!Ty->isSized())
+ return false;
+
// When dereferenceability information is provided by a dereferenceable
// attribute, we know exactly how many bytes are dereferenceable. If we can
// determine the exact offset to the attributed variable, we can use that
// information here.
// Require ABI alignment for loads without alignment specification
- if (Align == 0)
- Align = DL.getABITypeAlignment(Ty);
-
- if (!Ty->isSized())
- return false;
-
+ const Align Alignment = DL.getValueOrABITypeAlignment(MA, Ty);
APInt AccessSize(DL.getIndexTypeSizeInBits(V->getType()),
DL.getTypeStoreSize(Ty));
- return isDereferenceableAndAlignedPointer(V, Align, AccessSize,
- DL, CtxI, DT);
+ return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
+ DT);
}
bool llvm::isDereferenceablePointer(const Value *V, Type *Ty,
const DataLayout &DL,
const Instruction *CtxI,
const DominatorTree *DT) {
- return isDereferenceableAndAlignedPointer(V, Ty, 1, DL, CtxI, DT);
+ return isDereferenceableAndAlignedPointer(V, Ty, Align::None(), DL, CtxI, DT);
}
/// Test if A and B will obviously have the same value.
@@ -204,17 +202,16 @@ bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
DL.getTypeStoreSize(LI->getType()));
- unsigned Align = LI->getAlignment();
- if (Align == 0)
- Align = DL.getABITypeAlignment(LI->getType());
+ const Align Alignment = DL.getValueOrABITypeAlignment(
+ MaybeAlign(LI->getAlignment()), LI->getType());
Instruction *HeaderFirstNonPHI = L->getHeader()->getFirstNonPHI();
// If given a uniform (i.e. non-varying) address, see if we can prove the
// access is safe within the loop w/o needing predication.
if (L->isLoopInvariant(Ptr))
- return isDereferenceableAndAlignedPointer(Ptr, Align, EltSize, DL,
- HeaderFirstNonPHI, &DT);
+ return isDereferenceableAndAlignedPointer(Ptr, Alignment, EltSize, DL,
+ HeaderFirstNonPHI, &DT);
// Otherwise, check to see if we have a repeating access pattern where we can
// prove that all accesses are well aligned and dereferenceable.
@@ -245,10 +242,10 @@ bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
// For the moment, restrict ourselves to the case where the access size is a
// multiple of the requested alignment and the base is aligned.
// TODO: generalize if a case found which warrants
- if (EltSize.urem(Align) != 0)
+ if (EltSize.urem(Alignment.value()) != 0)
return false;
- return isDereferenceableAndAlignedPointer(Base, Align, AccessSize,
- DL, HeaderFirstNonPHI, &DT);
+ return isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL,
+ HeaderFirstNonPHI, &DT);
}
/// Check if executing a load of this pointer value cannot trap.
@@ -262,18 +259,17 @@ bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
///
/// This uses the pointee type to determine how many bytes need to be safe to
/// load from the pointer.
-bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align, APInt &Size,
+bool llvm::isSafeToLoadUnconditionally(Value *V, MaybeAlign MA, APInt &Size,
const DataLayout &DL,
Instruction *ScanFrom,
const DominatorTree *DT) {
// Zero alignment means that the load has the ABI alignment for the target
- if (Align == 0)
- Align = DL.getABITypeAlignment(V->getType()->getPointerElementType());
- assert(isPowerOf2_32(Align));
+ const Align Alignment =
+ DL.getValueOrABITypeAlignment(MA, V->getType()->getPointerElementType());
// If DT is not specified we can't make context-sensitive query
const Instruction* CtxI = DT ? ScanFrom : nullptr;
- if (isDereferenceableAndAlignedPointer(V, Align, Size, DL, CtxI, DT))
+ if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, DT))
return true;
if (!ScanFrom)
@@ -305,7 +301,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align, APInt &Size,
return false;
Value *AccessedPtr;
- unsigned AccessedAlign;
+ MaybeAlign MaybeAccessedAlign;
if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
// Ignore volatile loads. The execution of a volatile load cannot
// be used to prove an address is backed by regular memory; it can,
@@ -313,20 +309,21 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align, APInt &Size,
if (LI->isVolatile())
continue;
AccessedPtr = LI->getPointerOperand();
- AccessedAlign = LI->getAlignment();
+ MaybeAccessedAlign = MaybeAlign(LI->getAlignment());
} else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
// Ignore volatile stores (see comment for loads).
if (SI->isVolatile())
continue;
AccessedPtr = SI->getPointerOperand();
- AccessedAlign = SI->getAlignment();
+ MaybeAccessedAlign = MaybeAlign(SI->getAlignment());
} else
continue;
Type *AccessedTy = AccessedPtr->getType()->getPointerElementType();
- if (AccessedAlign == 0)
- AccessedAlign = DL.getABITypeAlignment(AccessedTy);
- if (AccessedAlign < Align)
+
+ const Align AccessedAlign =
+ DL.getValueOrABITypeAlignment(MaybeAccessedAlign, AccessedTy);
+ if (AccessedAlign < Alignment)
continue;
// Handle trivial cases.
@@ -341,12 +338,12 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align, APInt &Size,
return false;
}
-bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, unsigned Align,
+bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, MaybeAlign Alignment,
const DataLayout &DL,
Instruction *ScanFrom,
const DominatorTree *DT) {
APInt Size(DL.getIndexTypeSizeInBits(V->getType()), DL.getTypeStoreSize(Ty));
- return isSafeToLoadUnconditionally(V, Align, Size, DL, ScanFrom, DT);
+ return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, DT);
}
/// DefMaxInstsToScan - the default number of maximum instructions
diff --git a/lib/Analysis/MemDerefPrinter.cpp b/lib/Analysis/MemDerefPrinter.cpp
index 77ebf89d9a0..5cf516a538b 100644
--- a/lib/Analysis/MemDerefPrinter.cpp
+++ b/lib/Analysis/MemDerefPrinter.cpp
@@ -55,8 +55,8 @@ bool MemDerefPrinter::runOnFunction(Function &F) {
Value *PO = LI->getPointerOperand();
if (isDereferenceablePointer(PO, LI->getType(), DL))
Deref.push_back(PO);
- if (isDereferenceableAndAlignedPointer(PO, LI->getType(),
- LI->getAlignment(), DL))
+ if (isDereferenceableAndAlignedPointer(
+ PO, LI->getType(), MaybeAlign(LI->getAlignment()), DL))
DerefAndAligned.insert(PO);
}
}
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index c62ec353b83..bbf38999183 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -3938,9 +3938,9 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V,
if (mustSuppressSpeculation(*LI))
return false;
const DataLayout &DL = LI->getModule()->getDataLayout();
- return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
- LI->getType(), LI->getAlignment(),
- DL, CtxI, DT);
+ return isDereferenceableAndAlignedPointer(
+ LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
+ DL, CtxI, DT);
}
case Instruction::Call: {
auto *CI = cast<const CallInst>(Inst);
diff --git a/lib/CodeGen/MachineOperand.cpp b/lib/CodeGen/MachineOperand.cpp
index 8eccfb85a94..8b19501ec3c 100644
--- a/lib/CodeGen/MachineOperand.cpp
+++ b/lib/CodeGen/MachineOperand.cpp
@@ -979,7 +979,8 @@ bool MachinePointerInfo::isDereferenceable(unsigned Size, LLVMContext &C,
return false;
return isDereferenceableAndAlignedPointer(
- BasePtr, 1, APInt(DL.getPointerSizeInBits(), Offset + Size), DL);
+ BasePtr, Align::None(), APInt(DL.getPointerSizeInBits(), Offset + Size),
+ DL);
}
/// getConstantPool - Return a MachinePointerInfo record that refers to the
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 8d4b0dc0a7a..c15fb27a4c7 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1059,9 +1059,9 @@ Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) {
// If we can unconditionally load from this address, replace with a
// load/select idiom. TODO: use DT for context sensitive query
- if (isDereferenceableAndAlignedPointer(LoadPtr, II.getType(), Alignment,
- II.getModule()->getDataLayout(),
- &II, nullptr)) {
+ if (isDereferenceableAndAlignedPointer(
+ LoadPtr, II.getType(), MaybeAlign(Alignment),
+ II.getModule()->getDataLayout(), &II, nullptr)) {
Value *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
"unmaskedload");
return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3));
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 4c5e1cc4376..3e035f43370 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -175,7 +175,7 @@ static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
if (!AllocaSize)
return false;
- return isDereferenceableAndAlignedPointer(V, AI->getAlignment(),
+ return isDereferenceableAndAlignedPointer(V, Align(AI->getAlignment()),
APInt(64, AllocaSize), DL);
}
@@ -1020,11 +1020,11 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
//
if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
// load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
- unsigned Align = LI.getAlignment();
- if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(), Align,
- DL, SI) &&
- isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(), Align,
- DL, SI)) {
+ const MaybeAlign Alignment(LI.getAlignment());
+ if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(),
+ Alignment, DL, SI) &&
+ isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(),
+ Alignment, DL, SI)) {
LoadInst *V1 =
Builder.CreateLoad(LI.getType(), SI->getOperand(1),
SI->getOperand(1)->getName() + ".val");
@@ -1032,9 +1032,9 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
Builder.CreateLoad(LI.getType(), SI->getOperand(2),
SI->getOperand(2)->getName() + ".val");
assert(LI.isUnordered() && "implied by above");
- V1->setAlignment(MaybeAlign(Align));
+ V1->setAlignment(Alignment);
V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
- V2->setAlignment(MaybeAlign(Align));
+ V2->setAlignment(Alignment);
V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
return SelectInst::Create(SI->getCondition(), V1, V2);
}
diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp
index 262d64f1618..6ce4831a735 100644
--- a/lib/Transforms/Scalar/LICM.cpp
+++ b/lib/Transforms/Scalar/LICM.cpp
@@ -2033,7 +2033,8 @@ bool llvm::promoteLoopAccessesToScalars(
if (!DereferenceableInPH) {
DereferenceableInPH = isDereferenceableAndAlignedPointer(
Store->getPointerOperand(), Store->getValueOperand()->getType(),
- Store->getAlignment(), MDL, Preheader->getTerminator(), DT);
+ MaybeAlign(Store->getAlignment()), MDL,
+ Preheader->getTerminator(), DT);
}
} else
return false; // Not a load or store.
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index 4b816832c31..74b8ff91305 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -1199,7 +1199,7 @@ static bool isSafePHIToSpeculate(PHINode &PN) {
// TODO: Allow recursive phi users.
// TODO: Allow stores.
BasicBlock *BB = PN.getParent();
- unsigned MaxAlign = 0;
+ MaybeAlign MaxAlign;
uint64_t APWidth = DL.getIndexTypeSizeInBits(PN.getType());
APInt MaxSize(APWidth, 0);
bool HaveLoad = false;
@@ -1221,7 +1221,7 @@ static bool isSafePHIToSpeculate(PHINode &PN) {
return false;
uint64_t Size = DL.getTypeStoreSize(LI->getType());
- MaxAlign = std::max(MaxAlign, LI->getAlignment());
+ MaxAlign = std::max(MaxAlign, MaybeAlign(LI->getAlignment()));
MaxSize = MaxSize.ult(Size) ? APInt(APWidth, Size) : MaxSize;
HaveLoad = true;
}
@@ -1340,11 +1340,11 @@ static bool isSafeSelectToSpeculate(SelectInst &SI) {
// Both operands to the select need to be dereferenceable, either
// absolutely (e.g. allocas) or at this point because we can see other
// accesses to it.
- if (!isSafeToLoadUnconditionally(TValue, LI->getType(), LI->getAlignment(),
- DL, LI))
+ if (!isSafeToLoadUnconditionally(TValue, LI->getType(),
+ MaybeAlign(LI->getAlignment()), DL, LI))
return false;
- if (!isSafeToLoadUnconditionally(FValue, LI->getType(), LI->getAlignment(),
- DL, LI))
+ if (!isSafeToLoadUnconditionally(FValue, LI->getType(),
+ MaybeAlign(LI->getAlignment()), DL, LI))
return false;
}
diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp
index f0b79079d81..b27a36b67d6 100644
--- a/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -341,7 +341,7 @@ static bool canMoveAboveCall(Instruction *I, CallInst *CI, AliasAnalysis *AA) {
const DataLayout &DL = L->getModule()->getDataLayout();
if (isModSet(AA->getModRefInfo(CI, MemoryLocation::get(L))) ||
!isSafeToLoadUnconditionally(L->getPointerOperand(), L->getType(),
- L->getAlignment(), DL, L))
+ MaybeAlign(L->getAlignment()), DL, L))
return false;
}
}
diff --git a/lib/Transforms/Utils/SimplifyLibCalls.cpp b/lib/Transforms/Utils/SimplifyLibCalls.cpp
index 361b559ac02..0324993a820 100644
--- a/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -177,7 +177,8 @@ static bool canTransformToMemCmp(CallInst *CI, Value *Str, uint64_t Len,
if (!isOnlyUsedInComparisonWithZero(CI))
return false;
- if (!isDereferenceableAndAlignedPointer(Str, 1, APInt(64, Len), DL))
+ if (!isDereferenceableAndAlignedPointer(Str, Align::None(), APInt(64, Len),
+ DL))
return false;
if (CI->getFunction()->hasFnAttribute(Attribute::SanitizeMemory))