Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2026-05-10 08:43:19

0001 //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
0002 //
0003 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
0004 // See https://llvm.org/LICENSE.txt for license information.
0005 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
0006 //
0007 //===----------------------------------------------------------------------===//
0008 //
0009 // This file contains routines that help analyze properties that chains of
0010 // computations have.
0011 //
0012 //===----------------------------------------------------------------------===//
0013 
0014 #ifndef LLVM_ANALYSIS_VALUETRACKING_H
0015 #define LLVM_ANALYSIS_VALUETRACKING_H
0016 
0017 #include "llvm/Analysis/SimplifyQuery.h"
0018 #include "llvm/Analysis/WithCache.h"
0019 #include "llvm/IR/Constants.h"
0020 #include "llvm/IR/DataLayout.h"
0021 #include "llvm/IR/FMF.h"
0022 #include "llvm/IR/Instructions.h"
0023 #include "llvm/IR/InstrTypes.h"
0024 #include "llvm/IR/Intrinsics.h"
0025 #include <cassert>
0026 #include <cstdint>
0027 
0028 namespace llvm {
0029 
0030 class Operator;
0031 class AddOperator;
0032 class AssumptionCache;
0033 class DominatorTree;
0034 class GEPOperator;
0035 class WithOverflowInst;
0036 struct KnownBits;
0037 class Loop;
0038 class LoopInfo;
0039 class MDNode;
0040 class StringRef;
0041 class TargetLibraryInfo;
0042 template <typename T> class ArrayRef;
0043 
0044 constexpr unsigned MaxAnalysisRecursionDepth = 6;
0045 
0046 /// Determine which bits of V are known to be either zero or one and return
0047 /// them in the KnownZero/KnownOne bit sets.
0048 ///
0049 /// This function is defined on values with integer type, values with pointer
0050 /// type, and vectors of integers.  In the case
0051 /// where V is a vector, the known zero and known one values are the
0052 /// same width as the vector element, and the bit is set only if it is true
0053 /// for all of the elements in the vector.
0054 void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL,
0055                       unsigned Depth = 0, AssumptionCache *AC = nullptr,
0056                       const Instruction *CxtI = nullptr,
0057                       const DominatorTree *DT = nullptr,
0058                       bool UseInstrInfo = true);
0059 
0060 /// Returns the known bits rather than passing by reference.
0061 KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
0062                            unsigned Depth = 0, AssumptionCache *AC = nullptr,
0063                            const Instruction *CxtI = nullptr,
0064                            const DominatorTree *DT = nullptr,
0065                            bool UseInstrInfo = true);
0066 
0067 /// Returns the known bits rather than passing by reference.
0068 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
0069                            const DataLayout &DL, unsigned Depth = 0,
0070                            AssumptionCache *AC = nullptr,
0071                            const Instruction *CxtI = nullptr,
0072                            const DominatorTree *DT = nullptr,
0073                            bool UseInstrInfo = true);
0074 
0075 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
0076                            unsigned Depth, const SimplifyQuery &Q);
0077 
0078 KnownBits computeKnownBits(const Value *V, unsigned Depth,
0079                            const SimplifyQuery &Q);
0080 
0081 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
0082                       const SimplifyQuery &Q);
0083 
0084 /// Compute known bits from the range metadata.
0085 /// \p KnownZero the set of bits that are known to be zero
0086 /// \p KnownOne the set of bits that are known to be one
0087 void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known);
0088 
0089 /// Merge bits known from context-dependent facts into Known.
0090 void computeKnownBitsFromContext(const Value *V, KnownBits &Known,
0091                                  unsigned Depth, const SimplifyQuery &Q);
0092 
0093 /// Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
0094 KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I,
0095                                        const KnownBits &KnownLHS,
0096                                        const KnownBits &KnownRHS,
0097                                        unsigned Depth, const SimplifyQuery &SQ);
0098 
0099 /// Adjust \p Known for the given select \p Arm to include information from the
0100 /// select \p Cond.
0101 void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm,
0102                                  bool Invert, unsigned Depth,
0103                                  const SimplifyQuery &Q);
0104 
0105 /// Return true if LHS and RHS have no common bits set.
0106 bool haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
0107                          const WithCache<const Value *> &RHSCache,
0108                          const SimplifyQuery &SQ);
0109 
0110 /// Return true if the given value is known to have exactly one bit set when
0111 /// defined. For vectors return true if every element is known to be a power
0112 /// of two when defined. Supports values with integer or pointer type and
0113 /// vectors of integers. If 'OrZero' is set, then return true if the given
0114 /// value is either a power of two or zero.
0115 bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
0116                             bool OrZero = false, unsigned Depth = 0,
0117                             AssumptionCache *AC = nullptr,
0118                             const Instruction *CxtI = nullptr,
0119                             const DominatorTree *DT = nullptr,
0120                             bool UseInstrInfo = true);
0121 
0122 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
0123                             const SimplifyQuery &Q);
0124 
0125 bool isOnlyUsedInZeroComparison(const Instruction *CxtI);
0126 
0127 bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
0128 
0129 /// Return true if the given value is known to be non-zero when defined. For
0130 /// vectors, return true if every element is known to be non-zero when
0131 /// defined. For pointers, if the context instruction and dominator tree are
0132 /// specified, perform context-sensitive analysis and return true if the
0133 /// pointer couldn't possibly be null at the specified instruction.
0134 /// Supports values with integer or pointer type and vectors of integers.
0135 bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth = 0);
0136 
0137 /// Return true if the two given values are negation.
0138 /// Currently can recoginze Value pair:
0139 /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
0140 /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
0141 bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false,
0142                      bool AllowPoison = true);
0143 
0144 /// Return true iff:
0145 /// 1. X is poison implies Y is poison.
0146 /// 2. X is true implies Y is false.
0147 /// 3. X is false implies Y is true.
0148 /// Otherwise, return false.
0149 bool isKnownInversion(const Value *X, const Value *Y);
0150 
0151 /// Returns true if the give value is known to be non-negative.
0152 bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ,
0153                         unsigned Depth = 0);
0154 
0155 /// Returns true if the given value is known be positive (i.e. non-negative
0156 /// and non-zero).
0157 bool isKnownPositive(const Value *V, const SimplifyQuery &SQ,
0158                      unsigned Depth = 0);
0159 
0160 /// Returns true if the given value is known be negative (i.e. non-positive
0161 /// and non-zero).
0162 bool isKnownNegative(const Value *V, const SimplifyQuery &SQ,
0163                      unsigned Depth = 0);
0164 
0165 /// Return true if the given values are known to be non-equal when defined.
0166 /// Supports scalar integer types only.
0167 bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
0168                      AssumptionCache *AC = nullptr,
0169                      const Instruction *CxtI = nullptr,
0170                      const DominatorTree *DT = nullptr,
0171                      bool UseInstrInfo = true);
0172 
0173 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
0174 /// simplify operations downstream. Mask is known to be zero for bits that V
0175 /// cannot have.
0176 ///
0177 /// This function is defined on values with integer type, values with pointer
0178 /// type, and vectors of integers.  In the case
0179 /// where V is a vector, the mask, known zero, and known one values are the
0180 /// same width as the vector element, and the bit is set only if it is true
0181 /// for all of the elements in the vector.
0182 bool MaskedValueIsZero(const Value *V, const APInt &Mask,
0183                        const SimplifyQuery &SQ, unsigned Depth = 0);
0184 
0185 /// Return the number of times the sign bit of the register is replicated into
0186 /// the other bits. We know that at least 1 bit is always equal to the sign
0187 /// bit (itself), but other cases can give us information. For example,
0188 /// immediately after an "ashr X, 2", we know that the top 3 bits are all
0189 /// equal to each other, so we return 3. For vectors, return the number of
0190 /// sign bits for the vector element with the mininum number of known sign
0191 /// bits.
0192 unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
0193                             unsigned Depth = 0, AssumptionCache *AC = nullptr,
0194                             const Instruction *CxtI = nullptr,
0195                             const DominatorTree *DT = nullptr,
0196                             bool UseInstrInfo = true);
0197 
0198 /// Get the upper bound on bit size for this Value \p Op as a signed integer.
0199 /// i.e.  x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)).
0200 /// Similar to the APInt::getSignificantBits function.
0201 unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL,
0202                                    unsigned Depth = 0,
0203                                    AssumptionCache *AC = nullptr,
0204                                    const Instruction *CxtI = nullptr,
0205                                    const DominatorTree *DT = nullptr);
0206 
0207 /// Map a call instruction to an intrinsic ID.  Libcalls which have equivalent
0208 /// intrinsics are treated as-if they were intrinsics.
0209 Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB,
0210                                       const TargetLibraryInfo *TLI);
0211 
0212 /// Given an exploded icmp instruction, return true if the comparison only
0213 /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
0214 /// the result of the comparison is true when the input value is signed.
0215 bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
0216                     bool &TrueIfSigned);
0217 
0218 /// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
0219 /// same result as an fcmp with the given operands.
0220 ///
0221 /// If \p LookThroughSrc is true, consider the input value when computing the
0222 /// mask.
0223 ///
0224 /// If \p LookThroughSrc is false, ignore the source value (i.e. the first pair
0225 /// element will always be LHS.
0226 std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
0227                                                 const Function &F, Value *LHS,
0228                                                 Value *RHS,
0229                                                 bool LookThroughSrc = true);
0230 std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
0231                                                 const Function &F, Value *LHS,
0232                                                 const APFloat *ConstRHS,
0233                                                 bool LookThroughSrc = true);
0234 
0235 /// Compute the possible floating-point classes that \p LHS could be based on
0236 /// fcmp \Pred \p LHS, \p RHS.
0237 ///
0238 /// \returns { TestedValue, ClassesIfTrue, ClassesIfFalse }
0239 ///
0240 /// If the compare returns an exact class test, ClassesIfTrue == ~ClassesIfFalse
0241 ///
0242 /// This is a less exact version of fcmpToClassTest (e.g. fcmpToClassTest will
0243 /// only succeed for a test of x > 0 implies positive, but not x > 1).
0244 ///
0245 /// If \p LookThroughSrc is true, consider the input value when computing the
0246 /// mask. This may look through sign bit operations.
0247 ///
0248 /// If \p LookThroughSrc is false, ignore the source value (i.e. the first pair
0249 /// element will always be LHS.
0250 ///
0251 std::tuple<Value *, FPClassTest, FPClassTest>
0252 fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
0253                  Value *RHS, bool LookThroughSrc = true);
0254 std::tuple<Value *, FPClassTest, FPClassTest>
0255 fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
0256                  FPClassTest RHS, bool LookThroughSrc = true);
0257 std::tuple<Value *, FPClassTest, FPClassTest>
0258 fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
0259                  const APFloat &RHS, bool LookThroughSrc = true);
0260 
0261 struct KnownFPClass {
0262   /// Floating-point classes the value could be one of.
0263   FPClassTest KnownFPClasses = fcAllFlags;
0264 
0265   /// std::nullopt if the sign bit is unknown, true if the sign bit is
0266   /// definitely set or false if the sign bit is definitely unset.
0267   std::optional<bool> SignBit;
0268 
0269   bool operator==(KnownFPClass Other) const {
0270     return KnownFPClasses == Other.KnownFPClasses && SignBit == Other.SignBit;
0271   }
0272 
0273   /// Return true if it's known this can never be one of the mask entries.
0274   bool isKnownNever(FPClassTest Mask) const {
0275     return (KnownFPClasses & Mask) == fcNone;
0276   }
0277 
0278   bool isKnownAlways(FPClassTest Mask) const { return isKnownNever(~Mask); }
0279 
0280   bool isUnknown() const {
0281     return KnownFPClasses == fcAllFlags && !SignBit;
0282   }
0283 
0284   /// Return true if it's known this can never be a nan.
0285   bool isKnownNeverNaN() const {
0286     return isKnownNever(fcNan);
0287   }
0288 
0289   /// Return true if it's known this must always be a nan.
0290   bool isKnownAlwaysNaN() const { return isKnownAlways(fcNan); }
0291 
0292   /// Return true if it's known this can never be an infinity.
0293   bool isKnownNeverInfinity() const {
0294     return isKnownNever(fcInf);
0295   }
0296 
0297   /// Return true if it's known this can never be +infinity.
0298   bool isKnownNeverPosInfinity() const {
0299     return isKnownNever(fcPosInf);
0300   }
0301 
0302   /// Return true if it's known this can never be -infinity.
0303   bool isKnownNeverNegInfinity() const {
0304     return isKnownNever(fcNegInf);
0305   }
0306 
0307   /// Return true if it's known this can never be a subnormal
0308   bool isKnownNeverSubnormal() const {
0309     return isKnownNever(fcSubnormal);
0310   }
0311 
0312   /// Return true if it's known this can never be a positive subnormal
0313   bool isKnownNeverPosSubnormal() const {
0314     return isKnownNever(fcPosSubnormal);
0315   }
0316 
0317   /// Return true if it's known this can never be a negative subnormal
0318   bool isKnownNeverNegSubnormal() const {
0319     return isKnownNever(fcNegSubnormal);
0320   }
0321 
0322   /// Return true if it's known this can never be a zero. This means a literal
0323   /// [+-]0, and does not include denormal inputs implicitly treated as [+-]0.
0324   bool isKnownNeverZero() const {
0325     return isKnownNever(fcZero);
0326   }
0327 
0328   /// Return true if it's known this can never be a literal positive zero.
0329   bool isKnownNeverPosZero() const {
0330     return isKnownNever(fcPosZero);
0331   }
0332 
0333   /// Return true if it's known this can never be a negative zero. This means a
0334   /// literal -0 and does not include denormal inputs implicitly treated as -0.
0335   bool isKnownNeverNegZero() const {
0336     return isKnownNever(fcNegZero);
0337   }
0338 
0339   /// Return true if it's know this can never be interpreted as a zero. This
0340   /// extends isKnownNeverZero to cover the case where the assumed
0341   /// floating-point mode for the function interprets denormals as zero.
0342   bool isKnownNeverLogicalZero(const Function &F, Type *Ty) const;
0343 
0344   /// Return true if it's know this can never be interpreted as a negative zero.
0345   bool isKnownNeverLogicalNegZero(const Function &F, Type *Ty) const;
0346 
0347   /// Return true if it's know this can never be interpreted as a positive zero.
0348   bool isKnownNeverLogicalPosZero(const Function &F, Type *Ty) const;
0349 
0350   static constexpr FPClassTest OrderedLessThanZeroMask =
0351       fcNegSubnormal | fcNegNormal | fcNegInf;
0352   static constexpr FPClassTest OrderedGreaterThanZeroMask =
0353       fcPosSubnormal | fcPosNormal | fcPosInf;
0354 
0355   /// Return true if we can prove that the analyzed floating-point value is
0356   /// either NaN or never less than -0.0.
0357   ///
0358   ///      NaN --> true
0359   ///       +0 --> true
0360   ///       -0 --> true
0361   ///   x > +0 --> true
0362   ///   x < -0 --> false
0363   bool cannotBeOrderedLessThanZero() const {
0364     return isKnownNever(OrderedLessThanZeroMask);
0365   }
0366 
0367   /// Return true if we can prove that the analyzed floating-point value is
0368   /// either NaN or never greater than -0.0.
0369   ///      NaN --> true
0370   ///       +0 --> true
0371   ///       -0 --> true
0372   ///   x > +0 --> false
0373   ///   x < -0 --> true
0374   bool cannotBeOrderedGreaterThanZero() const {
0375     return isKnownNever(OrderedGreaterThanZeroMask);
0376   }
0377 
0378   KnownFPClass &operator|=(const KnownFPClass &RHS) {
0379     KnownFPClasses = KnownFPClasses | RHS.KnownFPClasses;
0380 
0381     if (SignBit != RHS.SignBit)
0382       SignBit = std::nullopt;
0383     return *this;
0384   }
0385 
0386   void knownNot(FPClassTest RuleOut) {
0387     KnownFPClasses = KnownFPClasses & ~RuleOut;
0388     if (isKnownNever(fcNan) && !SignBit) {
0389       if (isKnownNever(fcNegative))
0390         SignBit = false;
0391       else if (isKnownNever(fcPositive))
0392         SignBit = true;
0393     }
0394   }
0395 
0396   void fneg() {
0397     KnownFPClasses = llvm::fneg(KnownFPClasses);
0398     if (SignBit)
0399       SignBit = !*SignBit;
0400   }
0401 
0402   void fabs() {
0403     if (KnownFPClasses & fcNegZero)
0404       KnownFPClasses |= fcPosZero;
0405 
0406     if (KnownFPClasses & fcNegInf)
0407       KnownFPClasses |= fcPosInf;
0408 
0409     if (KnownFPClasses & fcNegSubnormal)
0410       KnownFPClasses |= fcPosSubnormal;
0411 
0412     if (KnownFPClasses & fcNegNormal)
0413       KnownFPClasses |= fcPosNormal;
0414 
0415     signBitMustBeZero();
0416   }
0417 
0418   /// Return true if the sign bit must be 0, ignoring the sign of nans.
0419   bool signBitIsZeroOrNaN() const {
0420     return isKnownNever(fcNegative);
0421   }
0422 
0423   /// Assume the sign bit is zero.
0424   void signBitMustBeZero() {
0425     KnownFPClasses &= (fcPositive | fcNan);
0426     SignBit = false;
0427   }
0428 
0429   /// Assume the sign bit is one.
0430   void signBitMustBeOne() {
0431     KnownFPClasses &= (fcNegative | fcNan);
0432     SignBit = true;
0433   }
0434 
0435   void copysign(const KnownFPClass &Sign) {
0436     // Don't know anything about the sign of the source. Expand the possible set
0437     // to its opposite sign pair.
0438     if (KnownFPClasses & fcZero)
0439       KnownFPClasses |= fcZero;
0440     if (KnownFPClasses & fcSubnormal)
0441       KnownFPClasses |= fcSubnormal;
0442     if (KnownFPClasses & fcNormal)
0443       KnownFPClasses |= fcNormal;
0444     if (KnownFPClasses & fcInf)
0445       KnownFPClasses |= fcInf;
0446 
0447     // Sign bit is exactly preserved even for nans.
0448     SignBit = Sign.SignBit;
0449 
0450     // Clear sign bits based on the input sign mask.
0451     if (Sign.isKnownNever(fcPositive | fcNan) || (SignBit && *SignBit))
0452       KnownFPClasses &= (fcNegative | fcNan);
0453     if (Sign.isKnownNever(fcNegative | fcNan) || (SignBit && !*SignBit))
0454       KnownFPClasses &= (fcPositive | fcNan);
0455   }
0456 
0457   // Propagate knowledge that a non-NaN source implies the result can also not
0458   // be a NaN. For unconstrained operations, signaling nans are not guaranteed
0459   // to be quieted but cannot be introduced.
0460   void propagateNaN(const KnownFPClass &Src, bool PreserveSign = false) {
0461     if (Src.isKnownNever(fcNan)) {
0462       knownNot(fcNan);
0463       if (PreserveSign)
0464         SignBit = Src.SignBit;
0465     } else if (Src.isKnownNever(fcSNan))
0466       knownNot(fcSNan);
0467   }
0468 
0469   /// Propagate knowledge from a source value that could be a denormal or
0470   /// zero. We have to be conservative since output flushing is not guaranteed,
0471   /// so known-never-zero may not hold.
0472   ///
0473   /// This assumes a copy-like operation and will replace any currently known
0474   /// information.
0475   void propagateDenormal(const KnownFPClass &Src, const Function &F, Type *Ty);
0476 
0477   /// Report known classes if \p Src is evaluated through a potentially
0478   /// canonicalizing operation. We can assume signaling nans will not be
0479   /// introduced, but cannot assume a denormal will be flushed under FTZ/DAZ.
0480   ///
0481   /// This assumes a copy-like operation and will replace any currently known
0482   /// information.
0483   void propagateCanonicalizingSrc(const KnownFPClass &Src, const Function &F,
0484                                   Type *Ty);
0485 
0486   void resetAll() { *this = KnownFPClass(); }
0487 };
0488 
0489 inline KnownFPClass operator|(KnownFPClass LHS, const KnownFPClass &RHS) {
0490   LHS |= RHS;
0491   return LHS;
0492 }
0493 
0494 inline KnownFPClass operator|(const KnownFPClass &LHS, KnownFPClass &&RHS) {
0495   RHS |= LHS;
0496   return std::move(RHS);
0497 }
0498 
0499 /// Determine which floating-point classes are valid for \p V, and return them
0500 /// in KnownFPClass bit sets.
0501 ///
0502 /// This function is defined on values with floating-point type, values vectors
0503 /// of floating-point type, and arrays of floating-point type.
0504 
0505 /// \p InterestedClasses is a compile time optimization hint for which floating
0506 /// point classes should be queried. Queries not specified in \p
0507 /// InterestedClasses should be reliable if they are determined during the
0508 /// query.
0509 KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts,
0510                                  FPClassTest InterestedClasses, unsigned Depth,
0511                                  const SimplifyQuery &SQ);
0512 
0513 KnownFPClass computeKnownFPClass(const Value *V, FPClassTest InterestedClasses,
0514                                  unsigned Depth, const SimplifyQuery &SQ);
0515 
0516 inline KnownFPClass computeKnownFPClass(
0517     const Value *V, const DataLayout &DL,
0518     FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
0519     const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
0520     const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
0521     bool UseInstrInfo = true) {
0522   return computeKnownFPClass(
0523       V, InterestedClasses, Depth,
0524       SimplifyQuery(DL, TLI, DT, AC, CxtI, UseInstrInfo));
0525 }
0526 
0527 /// Wrapper to account for known fast math flags at the use instruction.
0528 inline KnownFPClass
0529 computeKnownFPClass(const Value *V, const APInt &DemandedElts,
0530                     FastMathFlags FMF, FPClassTest InterestedClasses,
0531                     unsigned Depth, const SimplifyQuery &SQ) {
0532   if (FMF.noNaNs())
0533     InterestedClasses &= ~fcNan;
0534   if (FMF.noInfs())
0535     InterestedClasses &= ~fcInf;
0536 
0537   KnownFPClass Result =
0538       computeKnownFPClass(V, DemandedElts, InterestedClasses, Depth, SQ);
0539 
0540   if (FMF.noNaNs())
0541     Result.KnownFPClasses &= ~fcNan;
0542   if (FMF.noInfs())
0543     Result.KnownFPClasses &= ~fcInf;
0544   return Result;
0545 }
0546 
0547 inline KnownFPClass computeKnownFPClass(const Value *V, FastMathFlags FMF,
0548                                         FPClassTest InterestedClasses,
0549                                         unsigned Depth,
0550                                         const SimplifyQuery &SQ) {
0551   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
0552   APInt DemandedElts =
0553       FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
0554   return computeKnownFPClass(V, DemandedElts, FMF, InterestedClasses, Depth,
0555                              SQ);
0556 }
0557 
0558 /// Return true if we can prove that the specified FP value is never equal to
0559 /// -0.0. Users should use caution when considering PreserveSign
0560 /// denormal-fp-math.
0561 inline bool cannotBeNegativeZero(const Value *V, unsigned Depth,
0562                                  const SimplifyQuery &SQ) {
0563   KnownFPClass Known = computeKnownFPClass(V, fcNegZero, Depth, SQ);
0564   return Known.isKnownNeverNegZero();
0565 }
0566 
0567 /// Return true if we can prove that the specified FP value is either NaN or
0568 /// never less than -0.0.
0569 ///
0570 ///      NaN --> true
0571 ///       +0 --> true
0572 ///       -0 --> true
0573 ///   x > +0 --> true
0574 ///   x < -0 --> false
0575 inline bool cannotBeOrderedLessThanZero(const Value *V, unsigned Depth,
0576                                         const SimplifyQuery &SQ) {
0577   KnownFPClass Known =
0578       computeKnownFPClass(V, KnownFPClass::OrderedLessThanZeroMask, Depth, SQ);
0579   return Known.cannotBeOrderedLessThanZero();
0580 }
0581 
0582 /// Return true if the floating-point scalar value is not an infinity or if
0583 /// the floating-point vector value has no infinities. Return false if a value
0584 /// could ever be infinity.
0585 inline bool isKnownNeverInfinity(const Value *V, unsigned Depth,
0586                                  const SimplifyQuery &SQ) {
0587   KnownFPClass Known = computeKnownFPClass(V, fcInf, Depth, SQ);
0588   return Known.isKnownNeverInfinity();
0589 }
0590 
0591 /// Return true if the floating-point value can never contain a NaN or infinity.
0592 inline bool isKnownNeverInfOrNaN(const Value *V, unsigned Depth,
0593                                  const SimplifyQuery &SQ) {
0594   KnownFPClass Known = computeKnownFPClass(V, fcInf | fcNan, Depth, SQ);
0595   return Known.isKnownNeverNaN() && Known.isKnownNeverInfinity();
0596 }
0597 
0598 /// Return true if the floating-point scalar value is not a NaN or if the
0599 /// floating-point vector value has no NaN elements. Return false if a value
0600 /// could ever be NaN.
0601 inline bool isKnownNeverNaN(const Value *V, unsigned Depth,
0602                             const SimplifyQuery &SQ) {
0603   KnownFPClass Known = computeKnownFPClass(V, fcNan, Depth, SQ);
0604   return Known.isKnownNeverNaN();
0605 }
0606 
0607 /// Return false if we can prove that the specified FP value's sign bit is 0.
0608 /// Return true if we can prove that the specified FP value's sign bit is 1.
0609 /// Otherwise return std::nullopt.
0610 inline std::optional<bool> computeKnownFPSignBit(const Value *V, unsigned Depth,
0611                                                  const SimplifyQuery &SQ) {
0612   KnownFPClass Known = computeKnownFPClass(V, fcAllFlags, Depth, SQ);
0613   return Known.SignBit;
0614 }
0615 
0616 /// If the specified value can be set by repeating the same byte in memory,
0617 /// return the i8 value that it is represented with. This is true for all i8
0618 /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
0619 /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
0620 /// i16 0x1234), return null. If the value is entirely undef and padding,
0621 /// return undef.
0622 Value *isBytewiseValue(Value *V, const DataLayout &DL);
0623 
0624 /// Given an aggregate and an sequence of indices, see if the scalar value
0625 /// indexed is already around as a register, for example if it were inserted
0626 /// directly into the aggregate.
0627 ///
0628 /// If InsertBefore is not empty, this function will duplicate (modified)
0629 /// insertvalues when a part of a nested struct is extracted.
0630 Value *FindInsertedValue(
0631     Value *V, ArrayRef<unsigned> idx_range,
0632     std::optional<BasicBlock::iterator> InsertBefore = std::nullopt);
0633 
0634 /// Analyze the specified pointer to see if it can be expressed as a base
0635 /// pointer plus a constant offset. Return the base and offset to the caller.
0636 ///
0637 /// This is a wrapper around Value::stripAndAccumulateConstantOffsets that
0638 /// creates and later unpacks the required APInt.
0639 inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
0640                                                const DataLayout &DL,
0641                                                bool AllowNonInbounds = true) {
0642   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
0643   Value *Base =
0644       Ptr->stripAndAccumulateConstantOffsets(DL, OffsetAPInt, AllowNonInbounds);
0645 
0646   Offset = OffsetAPInt.getSExtValue();
0647   return Base;
0648 }
0649 inline const Value *
0650 GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
0651                                  const DataLayout &DL,
0652                                  bool AllowNonInbounds = true) {
0653   return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL,
0654                                           AllowNonInbounds);
0655 }
0656 
0657 /// Returns true if the GEP is based on a pointer to a string (array of
0658 // \p CharSize integers) and is indexing into this string.
0659 bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize = 8);
0660 
0661 /// Represents offset+length into a ConstantDataArray.
0662 struct ConstantDataArraySlice {
0663   /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
0664   /// initializer, it just doesn't fit the ConstantDataArray interface).
0665   const ConstantDataArray *Array;
0666 
0667   /// Slice starts at this Offset.
0668   uint64_t Offset;
0669 
0670   /// Length of the slice.
0671   uint64_t Length;
0672 
0673   /// Moves the Offset and adjusts Length accordingly.
0674   void move(uint64_t Delta) {
0675     assert(Delta < Length);
0676     Offset += Delta;
0677     Length -= Delta;
0678   }
0679 
0680   /// Convenience accessor for elements in the slice.
0681   uint64_t operator[](unsigned I) const {
0682     return Array == nullptr ? 0 : Array->getElementAsInteger(I + Offset);
0683   }
0684 };
0685 
0686 /// Returns true if the value \p V is a pointer into a ConstantDataArray.
0687 /// If successful \p Slice will point to a ConstantDataArray info object
0688 /// with an appropriate offset.
0689 bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
0690                               unsigned ElementSize, uint64_t Offset = 0);
0691 
0692 /// This function computes the length of a null-terminated C string pointed to
0693 /// by V. If successful, it returns true and returns the string in Str. If
0694 /// unsuccessful, it returns false. This does not include the trailing null
0695 /// character by default. If TrimAtNul is set to false, then this returns any
0696 /// trailing null characters as well as any other characters that come after
0697 /// it.
0698 bool getConstantStringInfo(const Value *V, StringRef &Str,
0699                            bool TrimAtNul = true);
0700 
0701 /// If we can compute the length of the string pointed to by the specified
0702 /// pointer, return 'len+1'.  If we can't, return 0.
0703 uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
0704 
0705 /// This function returns call pointer argument that is considered the same by
0706 /// aliasing rules. You CAN'T use it to replace one value with another. If
0707 /// \p MustPreserveNullness is true, the call must preserve the nullness of
0708 /// the pointer.
0709 const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call,
0710                                                   bool MustPreserveNullness);
0711 inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call,
0712                                                    bool MustPreserveNullness) {
0713   return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
0714       const_cast<const CallBase *>(Call), MustPreserveNullness));
0715 }
0716 
0717 /// {launder,strip}.invariant.group returns pointer that aliases its argument,
0718 /// and it only captures pointer by returning it.
0719 /// These intrinsics are not marked as nocapture, because returning is
0720 /// considered as capture. The arguments are not marked as returned neither,
0721 /// because it would make it useless. If \p MustPreserveNullness is true,
0722 /// the intrinsic must preserve the nullness of the pointer.
0723 bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
0724     const CallBase *Call, bool MustPreserveNullness);
0725 
0726 /// This method strips off any GEP address adjustments, pointer casts
0727 /// or `llvm.threadlocal.address` from the specified value \p V, returning the
0728 /// original object being addressed. Note that the returned value has pointer
0729 /// type if the specified value does. If the \p MaxLookup value is non-zero, it
0730 /// limits the number of instructions to be stripped off.
0731 const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6);
0732 inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) {
0733   // Force const to avoid infinite recursion.
0734   const Value *VConst = V;
0735   return const_cast<Value *>(getUnderlyingObject(VConst, MaxLookup));
0736 }
0737 
0738 /// Like getUnderlyingObject(), but will try harder to find a single underlying
0739 /// object. In particular, this function also looks through selects and phis.
0740 const Value *getUnderlyingObjectAggressive(const Value *V);
0741 
0742 /// This method is similar to getUnderlyingObject except that it can
0743 /// look through phi and select instructions and return multiple objects.
0744 ///
0745 /// If LoopInfo is passed, loop phis are further analyzed.  If a pointer
0746 /// accesses different objects in each iteration, we don't look through the
0747 /// phi node. E.g. consider this loop nest:
0748 ///
0749 ///   int **A;
0750 ///   for (i)
0751 ///     for (j) {
0752 ///        A[i][j] = A[i-1][j] * B[j]
0753 ///     }
0754 ///
0755 /// This is transformed by Load-PRE to stash away A[i] for the next iteration
0756 /// of the outer loop:
0757 ///
0758 ///   Curr = A[0];          // Prev_0
0759 ///   for (i: 1..N) {
0760 ///     Prev = Curr;        // Prev = PHI (Prev_0, Curr)
0761 ///     Curr = A[i];
0762 ///     for (j: 0..N) {
0763 ///        Curr[j] = Prev[j] * B[j]
0764 ///     }
0765 ///   }
0766 ///
0767 /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
0768 /// should not assume that Curr and Prev share the same underlying object thus
0769 /// it shouldn't look through the phi above.
0770 void getUnderlyingObjects(const Value *V,
0771                           SmallVectorImpl<const Value *> &Objects,
0772                           const LoopInfo *LI = nullptr, unsigned MaxLookup = 6);
0773 
0774 /// This is a wrapper around getUnderlyingObjects and adds support for basic
0775 /// ptrtoint+arithmetic+inttoptr sequences.
0776 bool getUnderlyingObjectsForCodeGen(const Value *V,
0777                                     SmallVectorImpl<Value *> &Objects);
0778 
0779 /// Returns unique alloca where the value comes from, or nullptr.
0780 /// If OffsetZero is true check that V points to the begining of the alloca.
0781 AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false);
0782 inline const AllocaInst *findAllocaForValue(const Value *V,
0783                                             bool OffsetZero = false) {
0784   return findAllocaForValue(const_cast<Value *>(V), OffsetZero);
0785 }
0786 
0787 /// Return true if the only users of this pointer are lifetime markers.
0788 bool onlyUsedByLifetimeMarkers(const Value *V);
0789 
0790 /// Return true if the only users of this pointer are lifetime markers or
0791 /// droppable instructions.
0792 bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
0793 
0794 /// Return true if the instruction doesn't potentially cross vector lanes. This
0795 /// condition is weaker than checking that the instruction is lanewise: lanewise
0796 /// means that the same operation is splatted across all lanes, but we also
0797 /// include the case where there is a different operation on each lane, as long
0798 /// as the operation only uses data from that lane. An example of an operation
0799 /// that is not lanewise, but doesn't cross vector lanes is insertelement.
0800 bool isNotCrossLaneOperation(const Instruction *I);
0801 
0802 /// Return true if the instruction does not have any effects besides
0803 /// calculating the result and does not have undefined behavior.
0804 ///
0805 /// This method never returns true for an instruction that returns true for
0806 /// mayHaveSideEffects; however, this method also does some other checks in
0807 /// addition. It checks for undefined behavior, like dividing by zero or
0808 /// loading from an invalid pointer (but not for undefined results, like a
0809 /// shift with a shift amount larger than the width of the result). It checks
0810 /// for malloc and alloca because speculatively executing them might cause a
0811 /// memory leak. It also returns false for instructions related to control
0812 /// flow, specifically terminators and PHI nodes.
0813 ///
0814 /// If the CtxI is specified this method performs context-sensitive analysis
0815 /// and returns true if it is safe to execute the instruction immediately
0816 /// before the CtxI. If the instruction has (transitive) operands that don't
0817 /// dominate CtxI, the analysis is performed under the assumption that these
0818 /// operands will also be speculated to a point before CxtI.
0819 ///
0820 /// If the CtxI is NOT specified this method only looks at the instruction
0821 /// itself and its operands, so if this method returns true, it is safe to
0822 /// move the instruction as long as the correct dominance relationships for
0823 /// the operands and users hold.
0824 ///
0825 /// This method can return true for instructions that read memory;
0826 /// for such instructions, moving them may change the resulting value.
0827 bool isSafeToSpeculativelyExecute(const Instruction *I,
0828                                   const Instruction *CtxI = nullptr,
0829                                   AssumptionCache *AC = nullptr,
0830                                   const DominatorTree *DT = nullptr,
0831                                   const TargetLibraryInfo *TLI = nullptr,
0832                                   bool UseVariableInfo = true);
0833 
0834 inline bool isSafeToSpeculativelyExecute(const Instruction *I,
0835                                          BasicBlock::iterator CtxI,
0836                                          AssumptionCache *AC = nullptr,
0837                                          const DominatorTree *DT = nullptr,
0838                                          const TargetLibraryInfo *TLI = nullptr,
0839                                          bool UseVariableInfo = true) {
0840   // Take an iterator, and unwrap it into an Instruction *.
0841   return isSafeToSpeculativelyExecute(I, &*CtxI, AC, DT, TLI, UseVariableInfo);
0842 }
0843 
0844 /// Don't use information from its non-constant operands. This helper is used
0845 /// when its operands are going to be replaced.
0846 inline bool
0847 isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I) {
0848   return isSafeToSpeculativelyExecute(I, nullptr, nullptr, nullptr, nullptr,
0849                                       /*UseVariableInfo=*/false);
0850 }
0851 
0852 /// This returns the same result as isSafeToSpeculativelyExecute if Opcode is
0853 /// the actual opcode of Inst. If the provided and actual opcode differ, the
0854 /// function (virtually) overrides the opcode of Inst with the provided
0855 /// Opcode. There are come constraints in this case:
0856 /// * If Opcode has a fixed number of operands (eg, as binary operators do),
0857 ///   then Inst has to have at least as many leading operands. The function
0858 ///   will ignore all trailing operands beyond that number.
0859 /// * If Opcode allows for an arbitrary number of operands (eg, as CallInsts
0860 ///   do), then all operands are considered.
0861 /// * The virtual instruction has to satisfy all typing rules of the provided
0862 ///   Opcode.
0863 /// * This function is pessimistic in the following sense: If one actually
0864 ///   materialized the virtual instruction, then isSafeToSpeculativelyExecute
0865 ///   may say that the materialized instruction is speculatable whereas this
0866 ///   function may have said that the instruction wouldn't be speculatable.
0867 ///   This behavior is a shortcoming in the current implementation and not
0868 ///   intentional.
0869 bool isSafeToSpeculativelyExecuteWithOpcode(
0870     unsigned Opcode, const Instruction *Inst, const Instruction *CtxI = nullptr,
0871     AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr,
0872     const TargetLibraryInfo *TLI = nullptr, bool UseVariableInfo = true);
0873 
0874 /// Returns true if the result or effects of the given instructions \p I
0875 /// depend values not reachable through the def use graph.
0876 /// * Memory dependence arises for example if the instruction reads from
0877 ///   memory or may produce effects or undefined behaviour. Memory dependent
0878 ///   instructions generally cannot be reorderd with respect to other memory
0879 ///   dependent instructions.
0880 /// * Control dependence arises for example if the instruction may fault
0881 ///   if lifted above a throwing call or infinite loop.
0882 bool mayHaveNonDefUseDependency(const Instruction &I);
0883 
0884 /// Return true if it is an intrinsic that cannot be speculated but also
0885 /// cannot trap.
0886 bool isAssumeLikeIntrinsic(const Instruction *I);
0887 
0888 /// Return true if it is valid to use the assumptions provided by an
0889 /// assume intrinsic, I, at the point in the control-flow identified by the
0890 /// context instruction, CxtI. By default, ephemeral values of the assumption
0891 /// are treated as an invalid context, to prevent the assumption from being used
0892 /// to optimize away its argument. If the caller can ensure that this won't
0893 /// happen, it can call with AllowEphemerals set to true to get more valid
0894 /// assumptions.
0895 bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
0896                              const DominatorTree *DT = nullptr,
0897                              bool AllowEphemerals = false);
0898 
0899 enum class OverflowResult {
0900   /// Always overflows in the direction of signed/unsigned min value.
0901   AlwaysOverflowsLow,
0902   /// Always overflows in the direction of signed/unsigned max value.
0903   AlwaysOverflowsHigh,
0904   /// May or may not overflow.
0905   MayOverflow,
0906   /// Never overflows.
0907   NeverOverflows,
0908 };
0909 
0910 OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS,
0911                                              const SimplifyQuery &SQ,
0912                                              bool IsNSW = false);
0913 OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
0914                                            const SimplifyQuery &SQ);
0915 OverflowResult
0916 computeOverflowForUnsignedAdd(const WithCache<const Value *> &LHS,
0917                               const WithCache<const Value *> &RHS,
0918                               const SimplifyQuery &SQ);
0919 OverflowResult computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
0920                                            const WithCache<const Value *> &RHS,
0921                                            const SimplifyQuery &SQ);
0922 /// This version also leverages the sign bit of Add if known.
0923 OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
0924                                            const SimplifyQuery &SQ);
0925 OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
0926                                              const SimplifyQuery &SQ);
0927 OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
0928                                            const SimplifyQuery &SQ);
0929 
0930 /// Returns true if the arithmetic part of the \p WO 's result is
0931 /// used only along the paths control dependent on the computation
0932 /// not overflowing, \p WO being an <op>.with.overflow intrinsic.
0933 bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
0934                                const DominatorTree &DT);
0935 
0936 /// Determine the possible constant range of vscale with the given bit width,
0937 /// based on the vscale_range function attribute.
0938 ConstantRange getVScaleRange(const Function *F, unsigned BitWidth);
0939 
0940 /// Determine the possible constant range of an integer or vector of integer
0941 /// value. This is intended as a cheap, non-recursive check.
0942 ConstantRange computeConstantRange(const Value *V, bool ForSigned,
0943                                    bool UseInstrInfo = true,
0944                                    AssumptionCache *AC = nullptr,
0945                                    const Instruction *CtxI = nullptr,
0946                                    const DominatorTree *DT = nullptr,
0947                                    unsigned Depth = 0);
0948 
0949 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
0950 ConstantRange
0951 computeConstantRangeIncludingKnownBits(const WithCache<const Value *> &V,
0952                                        bool ForSigned, const SimplifyQuery &SQ);
0953 
0954 /// Return true if this function can prove that the instruction I will
0955 /// always transfer execution to one of its successors (including the next
0956 /// instruction that follows within a basic block). E.g. this is not
0957 /// guaranteed for function calls that could loop infinitely.
0958 ///
0959 /// In other words, this function returns false for instructions that may
0960 /// transfer execution or fail to transfer execution in a way that is not
0961 /// captured in the CFG nor in the sequence of instructions within a basic
0962 /// block.
0963 ///
0964 /// Undefined behavior is assumed not to happen, so e.g. division is
0965 /// guaranteed to transfer execution to the following instruction even
0966 /// though division by zero might cause undefined behavior.
0967 bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
0968 
0969 /// Returns true if this block does not contain a potential implicit exit.
0970 /// This is equivelent to saying that all instructions within the basic block
0971 /// are guaranteed to transfer execution to their successor within the basic
0972 /// block. This has the same assumptions w.r.t. undefined behavior as the
0973 /// instruction variant of this function.
0974 bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
0975 
0976 /// Return true if every instruction in the range (Begin, End) is
0977 /// guaranteed to transfer execution to its static successor. \p ScanLimit
0978 /// bounds the search to avoid scanning huge blocks.
0979 bool isGuaranteedToTransferExecutionToSuccessor(
0980     BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
0981     unsigned ScanLimit = 32);
0982 
0983 /// Same as previous, but with range expressed via iterator_range.
0984 bool isGuaranteedToTransferExecutionToSuccessor(
0985     iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit = 32);
0986 
0987 /// Return true if this function can prove that the instruction I
0988 /// is executed for every iteration of the loop L.
0989 ///
0990 /// Note that this currently only considers the loop header.
0991 bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
0992                                             const Loop *L);
0993 
0994 /// Return true if \p PoisonOp's user yields poison or raises UB if its
0995 /// operand \p PoisonOp is poison.
0996 ///
0997 /// If \p PoisonOp is a vector or an aggregate and the operation's result is a
0998 /// single value, any poison element in /p PoisonOp should make the result
0999 /// poison or raise UB.
1000 ///
1001 /// To filter out operands that raise UB on poison, you can use
1002 /// getGuaranteedNonPoisonOp.
1003 bool propagatesPoison(const Use &PoisonOp);
1004 
1005 /// Insert operands of I into Ops such that I will trigger undefined behavior
1006 /// if I is executed and that operand has a poison value.
1007 void getGuaranteedNonPoisonOps(const Instruction *I,
1008                                SmallVectorImpl<const Value *> &Ops);
1009 
1010 /// Insert operands of I into Ops such that I will trigger undefined behavior
1011 /// if I is executed and that operand is not a well-defined value
1012 /// (i.e. has undef bits or poison).
1013 void getGuaranteedWellDefinedOps(const Instruction *I,
1014                                  SmallVectorImpl<const Value *> &Ops);
1015 
1016 /// Return true if the given instruction must trigger undefined behavior
1017 /// when I is executed with any operands which appear in KnownPoison holding
1018 /// a poison value at the point of execution.
1019 bool mustTriggerUB(const Instruction *I,
1020                    const SmallPtrSetImpl<const Value *> &KnownPoison);
1021 
1022 /// Return true if this function can prove that if Inst is executed
1023 /// and yields a poison value or undef bits, then that will trigger
1024 /// undefined behavior.
1025 ///
1026 /// Note that this currently only considers the basic block that is
1027 /// the parent of Inst.
1028 bool programUndefinedIfUndefOrPoison(const Instruction *Inst);
1029 bool programUndefinedIfPoison(const Instruction *Inst);
1030 
1031 /// canCreateUndefOrPoison returns true if Op can create undef or poison from
1032 /// non-undef & non-poison operands.
1033 /// For vectors, canCreateUndefOrPoison returns true if there is potential
1034 /// poison or undef in any element of the result when vectors without
1035 /// undef/poison poison are given as operands.
1036 /// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns
1037 /// true. If Op raises immediate UB but never creates poison or undef
1038 /// (e.g. sdiv I, 0), canCreatePoison returns false.
1039 ///
1040 /// \p ConsiderFlagsAndMetadata controls whether poison producing flags and
1041 /// metadata on the instruction are considered.  This can be used to see if the
1042 /// instruction could still introduce undef or poison even without poison
1043 /// generating flags and metadata which might be on the instruction.
1044 /// (i.e. could the result of Op->dropPoisonGeneratingFlags() still create
1045 /// poison or undef)
1046 ///
1047 /// canCreatePoison returns true if Op can create poison from non-poison
1048 /// operands.
1049 bool canCreateUndefOrPoison(const Operator *Op,
1050                             bool ConsiderFlagsAndMetadata = true);
1051 bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata = true);
1052 
1053 /// Return true if V is poison given that ValAssumedPoison is already poison.
1054 /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`,
1055 /// impliesPoison returns true.
1056 bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
1057 
1058 /// Return true if this function can prove that V does not have undef bits
1059 /// and is never poison. If V is an aggregate value or vector, check whether
1060 /// all elements (except padding) are not undef or poison.
1061 /// Note that this is different from canCreateUndefOrPoison because the
1062 /// function assumes Op's operands are not poison/undef.
1063 ///
1064 /// If CtxI and DT are specified this method performs flow-sensitive analysis
1065 /// and returns true if it is guaranteed to be never undef or poison
1066 /// immediately before the CtxI.
1067 bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
1068                                       AssumptionCache *AC = nullptr,
1069                                       const Instruction *CtxI = nullptr,
1070                                       const DominatorTree *DT = nullptr,
1071                                       unsigned Depth = 0);
1072 
1073 /// Returns true if V cannot be poison, but may be undef.
1074 bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr,
1075                                const Instruction *CtxI = nullptr,
1076                                const DominatorTree *DT = nullptr,
1077                                unsigned Depth = 0);
1078 
1079 inline bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
1080                                       BasicBlock::iterator CtxI,
1081                                       const DominatorTree *DT = nullptr,
1082                                       unsigned Depth = 0) {
1083   // Takes an iterator as a position, passes down to Instruction *
1084   // implementation.
1085   return isGuaranteedNotToBePoison(V, AC, &*CtxI, DT, Depth);
1086 }
1087 
1088 /// Returns true if V cannot be undef, but may be poison.
1089 bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC = nullptr,
1090                               const Instruction *CtxI = nullptr,
1091                               const DominatorTree *DT = nullptr,
1092                               unsigned Depth = 0);
1093 
1094 /// Return true if undefined behavior would provable be executed on the path to
1095 /// OnPathTo if Root produced a posion result.  Note that this doesn't say
1096 /// anything about whether OnPathTo is actually executed or whether Root is
1097 /// actually poison.  This can be used to assess whether a new use of Root can
1098 /// be added at a location which is control equivalent with OnPathTo (such as
1099 /// immediately before it) without introducing UB which didn't previously
1100 /// exist.  Note that a false result conveys no information.
1101 bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
1102                                    Instruction *OnPathTo,
1103                                    DominatorTree *DT);
1104 
1105 /// Convert an integer comparison with a constant RHS into an equivalent
1106 /// form with the strictness flipped predicate. Return the new predicate and
1107 /// corresponding constant RHS if possible. Otherwise return std::nullopt.
1108 /// E.g., (icmp sgt X, 0) -> (icmp sle X, 1).
1109 std::optional<std::pair<CmpPredicate, Constant *>>
1110 getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C);
1111 
1112 /// Specific patterns of select instructions we can match.
1113 enum SelectPatternFlavor {
1114   SPF_UNKNOWN = 0,
1115   SPF_SMIN,    /// Signed minimum
1116   SPF_UMIN,    /// Unsigned minimum
1117   SPF_SMAX,    /// Signed maximum
1118   SPF_UMAX,    /// Unsigned maximum
1119   SPF_FMINNUM, /// Floating point minnum
1120   SPF_FMAXNUM, /// Floating point maxnum
1121   SPF_ABS,     /// Absolute value
1122   SPF_NABS     /// Negated absolute value
1123 };
1124 
1125 /// Behavior when a floating point min/max is given one NaN and one
1126 /// non-NaN as input.
1127 enum SelectPatternNaNBehavior {
1128   SPNB_NA = 0,        /// NaN behavior not applicable.
1129   SPNB_RETURNS_NAN,   /// Given one NaN input, returns the NaN.
1130   SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
1131   SPNB_RETURNS_ANY    /// Given one NaN input, can return either (or
1132                       /// it has been determined that no operands can
1133                       /// be NaN).
1134 };
1135 
1136 struct SelectPatternResult {
1137   SelectPatternFlavor Flavor;
1138   SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
1139                                         /// SPF_FMINNUM or SPF_FMAXNUM.
1140   bool Ordered; /// When implementing this min/max pattern as
1141                 /// fcmp; select, does the fcmp have to be
1142                 /// ordered?
1143 
1144   /// Return true if \p SPF is a min or a max pattern.
1145   static bool isMinOrMax(SelectPatternFlavor SPF) {
1146     return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS;
1147   }
1148 };
1149 
1150 /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
1151 /// and providing the out parameter results if we successfully match.
1152 ///
1153 /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be
1154 /// the negation instruction from the idiom.
1155 ///
1156 /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
1157 /// not match that of the original select. If this is the case, the cast
1158 /// operation (one of Trunc,SExt,Zext) that must be done to transform the
1159 /// type of LHS and RHS into the type of V is returned in CastOp.
1160 ///
1161 /// For example:
1162 ///   %1 = icmp slt i32 %a, i32 4
1163 ///   %2 = sext i32 %a to i64
1164 ///   %3 = select i1 %1, i64 %2, i64 4
1165 ///
1166 /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
1167 ///
1168 SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
1169                                        Instruction::CastOps *CastOp = nullptr,
1170                                        unsigned Depth = 0);
1171 
1172 inline SelectPatternResult matchSelectPattern(const Value *V, const Value *&LHS,
1173                                               const Value *&RHS) {
1174   Value *L = const_cast<Value *>(LHS);
1175   Value *R = const_cast<Value *>(RHS);
1176   auto Result = matchSelectPattern(const_cast<Value *>(V), L, R);
1177   LHS = L;
1178   RHS = R;
1179   return Result;
1180 }
1181 
1182 /// Determine the pattern that a select with the given compare as its
1183 /// predicate and given values as its true/false operands would match.
1184 SelectPatternResult matchDecomposedSelectPattern(
1185     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
1186     Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0);
1187 
1188 /// Determine the pattern for predicate `X Pred Y ? X : Y`.
1189 SelectPatternResult
1190 getSelectPattern(CmpInst::Predicate Pred,
1191                  SelectPatternNaNBehavior NaNBehavior = SPNB_NA,
1192                  bool Ordered = false);
1193 
1194 /// Return the canonical comparison predicate for the specified
1195 /// minimum/maximum flavor.
1196 CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered = false);
1197 
1198 /// Convert given `SPF` to equivalent min/max intrinsic.
1199 /// Caller must ensure `SPF` is an integer min or max pattern.
1200 Intrinsic::ID getMinMaxIntrinsic(SelectPatternFlavor SPF);
1201 
1202 /// Return the inverse minimum/maximum flavor of the specified flavor.
1203 /// For example, signed minimum is the inverse of signed maximum.
1204 SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
1205 
1206 Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID);
1207 
1208 /// Return the minimum or maximum constant value for the specified integer
1209 /// min/max flavor and type.
1210 APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth);
1211 
1212 /// Check if the values in \p VL are select instructions that can be converted
1213 /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a
1214 /// conversion is possible, together with a bool indicating whether all select
1215 /// conditions are only used by the selects. Otherwise return
1216 /// Intrinsic::not_intrinsic.
1217 std::pair<Intrinsic::ID, bool>
1218 canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);
1219 
1220 /// Attempt to match a simple first order recurrence cycle of the form:
1221 ///   %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
1222 ///   %inc = binop %iv, %step
1223 /// OR
1224 ///   %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
1225 ///   %inc = binop %step, %iv
1226 ///
1227 /// A first order recurrence is a formula with the form: X_n = f(X_(n-1))
1228 ///
1229 /// A couple of notes on subtleties in that definition:
1230 /// * The Step does not have to be loop invariant.  In math terms, it can
1231 ///   be a free variable.  We allow recurrences with both constant and
1232 ///   variable coefficients. Callers may wish to filter cases where Step
1233 ///   does not dominate P.
1234 /// * For non-commutative operators, we will match both forms.  This
1235 ///   results in some odd recurrence structures.  Callers may wish to filter
1236 ///   out recurrences where the phi is not the LHS of the returned operator.
1237 /// * Because of the structure matched, the caller can assume as a post
1238 ///   condition of the match the presence of a Loop with P's parent as it's
1239 ///   header *except* in unreachable code.  (Dominance decays in unreachable
1240 ///   code.)
1241 ///
1242 /// NOTE: This is intentional simple.  If you want the ability to analyze
1243 /// non-trivial loop conditons, see ScalarEvolution instead.
1244 bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start,
1245                            Value *&Step);
1246 
1247 /// Analogous to the above, but starting from the binary operator
1248 bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, Value *&Start,
1249                            Value *&Step);
1250 
1251 /// Return true if RHS is known to be implied true by LHS.  Return false if
1252 /// RHS is known to be implied false by LHS.  Otherwise, return std::nullopt if
1253 /// no implication can be made. A & B must be i1 (boolean) values or a vector of
1254 /// such values. Note that the truth table for implication is the same as <=u on
1255 /// i1 values (but not
1256 /// <=s!).  The truth table for both is:
1257 ///    | T | F (B)
1258 ///  T | T | F
1259 ///  F | T | T
1260 /// (A)
1261 std::optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
1262                                        const DataLayout &DL,
1263                                        bool LHSIsTrue = true,
1264                                        unsigned Depth = 0);
1265 std::optional<bool> isImpliedCondition(const Value *LHS, CmpPredicate RHSPred,
1266                                        const Value *RHSOp0, const Value *RHSOp1,
1267                                        const DataLayout &DL,
1268                                        bool LHSIsTrue = true,
1269                                        unsigned Depth = 0);
1270 
1271 /// Return the boolean condition value in the context of the given instruction
1272 /// if it is known based on dominating conditions.
1273 std::optional<bool> isImpliedByDomCondition(const Value *Cond,
1274                                             const Instruction *ContextI,
1275                                             const DataLayout &DL);
1276 std::optional<bool> isImpliedByDomCondition(CmpPredicate Pred, const Value *LHS,
1277                                             const Value *RHS,
1278                                             const Instruction *ContextI,
1279                                             const DataLayout &DL);
1280 
1281 /// Call \p InsertAffected on all Values whose known bits / value may be
1282 /// affected by the condition \p Cond. Used by AssumptionCache and
1283 /// DomConditionCache.
1284 void findValuesAffectedByCondition(Value *Cond, bool IsAssume,
1285                                    function_ref<void(Value *)> InsertAffected);
1286 
1287 } // end namespace llvm
1288 
1289 #endif // LLVM_ANALYSIS_VALUETRACKING_H