aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Transforms/Scalar
diff options
context:
space:
mode:
authorEvgeniy Brevnov <ybrevnov@azul.com>2020-12-04 16:53:17 +0700
committerEvgeniy Brevnov <ybrevnov@azul.com>2020-12-08 16:53:37 +0700
commit2d1b024d06b2a81f1c35193a998a864be09e2ae8 (patch)
treedbca66916e007e1515f50f375857d6a3047d45fa /llvm/lib/Transforms/Scalar
parent80766ecc65096deeb4ff6f03562dcad94c54b862 (diff)
downloadllvm-project-2d1b024d06b2a81f1c35193a998a864be09e2ae8.tar.gz
[DSE][NFC] Need to be carefull mixing signed and unsigned types
Currently in some places we use signed type to represent size of an access and put explicit casts from unsigned to signed. For example: int64_t EarlierSize = int64_t(Loc.Size.getValue()); Even though it doesn't loos bits (immidiatly) it may overflow and we end up with negative size. Potentially that cause later code to work incorrectly. A simple expample is a check that size is not negative. I think it would be safer and clearer if we use unsigned type for the size and handle it appropriately. Reviewed By: fhahn Differential Revision: https://reviews.llvm.org/D92648
Diffstat (limited to 'llvm/lib/Transforms/Scalar')
-rw-r--r--llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp40
1 files changed, 27 insertions, 13 deletions
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 9499e892871f..e4b8980b01e8 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -1071,8 +1071,8 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
}
static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
- int64_t &EarlierSize, int64_t LaterOffset,
- int64_t LaterSize, bool IsOverwriteEnd) {
+ uint64_t &EarlierSize, int64_t LaterOffset,
+ uint64_t LaterSize, bool IsOverwriteEnd) {
// TODO: base this on the target vector size so that if the earlier
// store was too small to get vector writes anyway then its likely
// a good idea to shorten it
@@ -1127,16 +1127,23 @@ static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
static bool tryToShortenEnd(Instruction *EarlierWrite,
OverlapIntervalsTy &IntervalMap,
- int64_t &EarlierStart, int64_t &EarlierSize) {
+ int64_t &EarlierStart, uint64_t &EarlierSize) {
if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
return false;
OverlapIntervalsTy::iterator OII = --IntervalMap.end();
int64_t LaterStart = OII->second;
- int64_t LaterSize = OII->first - LaterStart;
+ uint64_t LaterSize = OII->first - LaterStart;
- if (LaterStart > EarlierStart && LaterStart < EarlierStart + EarlierSize &&
- LaterStart + LaterSize >= EarlierStart + EarlierSize) {
+ assert(OII->first - LaterStart >= 0 && "Size expected to be positive");
+
+ if (LaterStart > EarlierStart &&
+ // Note: "LaterStart - EarlierStart" is known to be positive due to
+ // preceding check.
+ (uint64_t)(LaterStart - EarlierStart) < EarlierSize &&
+ // Note: "EarlierSize - (uint64_t)(LaterStart - EarlierStart)" is known to
+ // be non negative due to preceding checks.
+ LaterSize >= EarlierSize - (uint64_t)(LaterStart - EarlierStart)) {
if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
LaterSize, true)) {
IntervalMap.erase(OII);
@@ -1148,16 +1155,23 @@ static bool tryToShortenEnd(Instruction *EarlierWrite,
static bool tryToShortenBegin(Instruction *EarlierWrite,
OverlapIntervalsTy &IntervalMap,
- int64_t &EarlierStart, int64_t &EarlierSize) {
+ int64_t &EarlierStart, uint64_t &EarlierSize) {
if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
return false;
OverlapIntervalsTy::iterator OII = IntervalMap.begin();
int64_t LaterStart = OII->second;
- int64_t LaterSize = OII->first - LaterStart;
+ uint64_t LaterSize = OII->first - LaterStart;
+
+ assert(OII->first - LaterStart >= 0 && "Size expected to be positive");
- if (LaterStart <= EarlierStart && LaterStart + LaterSize > EarlierStart) {
- assert(LaterStart + LaterSize < EarlierStart + EarlierSize &&
+ if (LaterStart <= EarlierStart &&
+ // Note: "EarlierStart - LaterStart" is known to be non negative due to
+ // preceding check.
+ LaterSize > (uint64_t)(EarlierStart - LaterStart)) {
+ // Note: "LaterSize - (uint64_t)(EarlierStart - LaterStart)" is known to be
+ // positive due to preceding checks.
+ assert(LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize &&
"Should have been handled as OW_Complete");
if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
LaterSize, false)) {
@@ -1179,7 +1193,7 @@ static bool removePartiallyOverlappedStores(const DataLayout &DL,
const Value *Ptr = Loc.Ptr->stripPointerCasts();
int64_t EarlierStart = 0;
- int64_t EarlierSize = int64_t(Loc.Size.getValue());
+ uint64_t EarlierSize = Loc.Size.getValue();
GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
OverlapIntervalsTy &IntervalMap = OI.second;
Changed |=
@@ -1428,8 +1442,8 @@ static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
"when partial-overwrite "
"tracking is enabled");
// The overwrite result is known, so these must be known, too.
- int64_t EarlierSize = DepLoc.Size.getValue();
- int64_t LaterSize = Loc.Size.getValue();
+ uint64_t EarlierSize = DepLoc.Size.getValue();
+ uint64_t LaterSize = Loc.Size.getValue();
bool IsOverwriteEnd = (OR == OW_End);
MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
InstWriteOffset, LaterSize, IsOverwriteEnd);