Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2026-05-10 08:44:45

0001 //===- llvm/Transforms/Vectorize/LoopVectorizationLegality.h ----*- C++ -*-===//
0002 //
0003 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
0004 // See https://llvm.org/LICENSE.txt for license information.
0005 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
0006 //
0007 //===----------------------------------------------------------------------===//
0008 //
0009 /// \file
0010 /// This file defines the LoopVectorizationLegality class. Original code
0011 /// in Loop Vectorizer has been moved out to its own file for modularity
0012 /// and reusability.
0013 ///
0014 /// Currently, it works for innermost loop vectorization. Extending this to
0015 /// outer loop vectorization is a TODO item.
0016 ///
0017 /// Also provides:
0018 /// 1) LoopVectorizeHints class which keeps a number of loop annotations
0019 /// locally for easy look up. It has the ability to write them back as
0020 /// loop metadata, upon request.
0021 /// 2) LoopVectorizationRequirements class for lazy bail out for the purpose
0022 /// of reporting useful failure to vectorize message.
0023 //
0024 //===----------------------------------------------------------------------===//
0025 
0026 #ifndef LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
0027 #define LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
0028 
0029 #include "llvm/ADT/MapVector.h"
0030 #include "llvm/Analysis/LoopAccessAnalysis.h"
0031 #include "llvm/Support/TypeSize.h"
0032 #include "llvm/Transforms/Utils/LoopUtils.h"
0033 
0034 namespace llvm {
0035 class AssumptionCache;
0036 class BasicBlock;
0037 class BlockFrequencyInfo;
0038 class DemandedBits;
0039 class DominatorTree;
0040 class Function;
0041 class Loop;
0042 class LoopInfo;
0043 class Metadata;
0044 class OptimizationRemarkEmitter;
0045 class PredicatedScalarEvolution;
0046 class ProfileSummaryInfo;
0047 class TargetLibraryInfo;
0048 class TargetTransformInfo;
0049 class Type;
0050 
0051 /// Utility class for getting and setting loop vectorizer hints in the form
0052 /// of loop metadata.
0053 /// This class keeps a number of loop annotations locally (as member variables)
0054 /// and can, upon request, write them back as metadata on the loop. It will
0055 /// initially scan the loop for existing metadata, and will update the local
0056 /// values based on information in the loop.
0057 /// We cannot write all values to metadata, as the mere presence of some info,
0058 /// for example 'force', means a decision has been made. So, we need to be
0059 /// careful NOT to add them if the user hasn't specifically asked so.
0060 class LoopVectorizeHints {
0061   enum HintKind {
0062     HK_WIDTH,
0063     HK_INTERLEAVE,
0064     HK_FORCE,
0065     HK_ISVECTORIZED,
0066     HK_PREDICATE,
0067     HK_SCALABLE
0068   };
0069 
0070   /// Hint - associates name and validation with the hint value.
0071   struct Hint {
0072     const char *Name;
0073     unsigned Value; // This may have to change for non-numeric values.
0074     HintKind Kind;
0075 
0076     Hint(const char *Name, unsigned Value, HintKind Kind)
0077         : Name(Name), Value(Value), Kind(Kind) {}
0078 
0079     bool validate(unsigned Val);
0080   };
0081 
0082   /// Vectorization width.
0083   Hint Width;
0084 
0085   /// Vectorization interleave factor.
0086   Hint Interleave;
0087 
0088   /// Vectorization forced
0089   Hint Force;
0090 
0091   /// Already Vectorized
0092   Hint IsVectorized;
0093 
0094   /// Vector Predicate
0095   Hint Predicate;
0096 
0097   /// Says whether we should use fixed width or scalable vectorization.
0098   Hint Scalable;
0099 
0100   /// Return the loop metadata prefix.
0101   static StringRef Prefix() { return "llvm.loop."; }
0102 
0103   /// True if there is any unsafe math in the loop.
0104   bool PotentiallyUnsafe = false;
0105 
0106 public:
0107   enum ForceKind {
0108     FK_Undefined = -1, ///< Not selected.
0109     FK_Disabled = 0,   ///< Forcing disabled.
0110     FK_Enabled = 1,    ///< Forcing enabled.
0111   };
0112 
0113   enum ScalableForceKind {
0114     /// Not selected.
0115     SK_Unspecified = -1,
0116     /// Disables vectorization with scalable vectors.
0117     SK_FixedWidthOnly = 0,
0118     /// Vectorize loops using scalable vectors or fixed-width vectors, but favor
0119     /// scalable vectors when the cost-model is inconclusive. This is the
0120     /// default when the scalable.enable hint is enabled through a pragma.
0121     SK_PreferScalable = 1
0122   };
0123 
0124   LoopVectorizeHints(const Loop *L, bool InterleaveOnlyWhenForced,
0125                      OptimizationRemarkEmitter &ORE,
0126                      const TargetTransformInfo *TTI = nullptr);
0127 
0128   /// Mark the loop L as already vectorized by setting the width to 1.
0129   void setAlreadyVectorized();
0130 
0131   bool allowVectorization(Function *F, Loop *L,
0132                           bool VectorizeOnlyWhenForced) const;
0133 
0134   /// Dumps all the hint information.
0135   void emitRemarkWithHints() const;
0136 
0137   ElementCount getWidth() const {
0138     return ElementCount::get(Width.Value, (ScalableForceKind)Scalable.Value ==
0139                                               SK_PreferScalable);
0140   }
0141 
0142   unsigned getInterleave() const {
0143     if (Interleave.Value)
0144       return Interleave.Value;
0145     // If interleaving is not explicitly set, assume that if we do not want
0146     // unrolling, we also don't want any interleaving.
0147     if (llvm::hasUnrollTransformation(TheLoop) & TM_Disable)
0148       return 1;
0149     return 0;
0150   }
0151   unsigned getIsVectorized() const { return IsVectorized.Value; }
0152   unsigned getPredicate() const { return Predicate.Value; }
0153   enum ForceKind getForce() const {
0154     if ((ForceKind)Force.Value == FK_Undefined &&
0155         hasDisableAllTransformsHint(TheLoop))
0156       return FK_Disabled;
0157     return (ForceKind)Force.Value;
0158   }
0159 
0160   /// \return true if scalable vectorization has been explicitly disabled.
0161   bool isScalableVectorizationDisabled() const {
0162     return (ScalableForceKind)Scalable.Value == SK_FixedWidthOnly;
0163   }
0164 
0165   /// If hints are provided that force vectorization, use the AlwaysPrint
0166   /// pass name to force the frontend to print the diagnostic.
0167   const char *vectorizeAnalysisPassName() const;
0168 
0169   /// When enabling loop hints are provided we allow the vectorizer to change
0170   /// the order of operations that is given by the scalar loop. This is not
0171   /// enabled by default because can be unsafe or inefficient. For example,
0172   /// reordering floating-point operations will change the way round-off
0173   /// error accumulates in the loop.
0174   bool allowReordering() const;
0175 
0176   bool isPotentiallyUnsafe() const {
0177     // Avoid FP vectorization if the target is unsure about proper support.
0178     // This may be related to the SIMD unit in the target not handling
0179     // IEEE 754 FP ops properly, or bad single-to-double promotions.
0180     // Otherwise, a sequence of vectorized loops, even without reduction,
0181     // could lead to different end results on the destination vectors.
0182     return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe;
0183   }
0184 
0185   void setPotentiallyUnsafe() { PotentiallyUnsafe = true; }
0186 
0187 private:
0188   /// Find hints specified in the loop metadata and update local values.
0189   void getHintsFromMetadata();
0190 
0191   /// Checks string hint with one operand and set value if valid.
0192   void setHint(StringRef Name, Metadata *Arg);
0193 
0194   /// The loop these hints belong to.
0195   const Loop *TheLoop;
0196 
0197   /// Interface to emit optimization remarks.
0198   OptimizationRemarkEmitter &ORE;
0199 };
0200 
0201 /// This holds vectorization requirements that must be verified late in
0202 /// the process. The requirements are set by legalize and costmodel. Once
0203 /// vectorization has been determined to be possible and profitable the
0204 /// requirements can be verified by looking for metadata or compiler options.
0205 /// For example, some loops require FP commutativity which is only allowed if
0206 /// vectorization is explicitly specified or if the fast-math compiler option
0207 /// has been provided.
0208 /// Late evaluation of these requirements allows helpful diagnostics to be
0209 /// composed that tells the user what need to be done to vectorize the loop. For
0210 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late
0211 /// evaluation should be used only when diagnostics can generated that can be
0212 /// followed by a non-expert user.
0213 class LoopVectorizationRequirements {
0214 public:
0215   /// Track the 1st floating-point instruction that can not be reassociated.
0216   void addExactFPMathInst(Instruction *I) {
0217     if (I && !ExactFPMathInst)
0218       ExactFPMathInst = I;
0219   }
0220 
0221   Instruction *getExactFPInst() { return ExactFPMathInst; }
0222 
0223 private:
0224   Instruction *ExactFPMathInst = nullptr;
0225 };
0226 
0227 /// This holds details about a histogram operation -- a load -> update -> store
0228 /// sequence where each lane in a vector might be updating the same element as
0229 /// another lane.
0230 struct HistogramInfo {
0231   LoadInst *Load;
0232   Instruction *Update;
0233   StoreInst *Store;
0234 
0235   HistogramInfo(LoadInst *Load, Instruction *Update, StoreInst *Store)
0236       : Load(Load), Update(Update), Store(Store) {}
0237 };
0238 
0239 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
0240 /// to what vectorization factor.
0241 /// This class does not look at the profitability of vectorization, only the
0242 /// legality. This class has two main kinds of checks:
0243 /// * Memory checks - The code in canVectorizeMemory checks if vectorization
0244 ///   will change the order of memory accesses in a way that will change the
0245 ///   correctness of the program.
0246 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory
0247 /// checks for a number of different conditions, such as the availability of a
0248 /// single induction variable, that all types are supported and vectorize-able,
0249 /// etc. This code reflects the capabilities of InnerLoopVectorizer.
0250 /// This class is also used by InnerLoopVectorizer for identifying
0251 /// induction variable and the different reduction variables.
0252 class LoopVectorizationLegality {
0253 public:
0254   LoopVectorizationLegality(
0255       Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT,
0256       TargetTransformInfo *TTI, TargetLibraryInfo *TLI, Function *F,
0257       LoopAccessInfoManager &LAIs, LoopInfo *LI, OptimizationRemarkEmitter *ORE,
0258       LoopVectorizationRequirements *R, LoopVectorizeHints *H, DemandedBits *DB,
0259       AssumptionCache *AC, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
0260       : TheLoop(L), LI(LI), PSE(PSE), TTI(TTI), TLI(TLI), DT(DT), LAIs(LAIs),
0261         ORE(ORE), Requirements(R), Hints(H), DB(DB), AC(AC), BFI(BFI),
0262         PSI(PSI) {}
0263 
0264   /// ReductionList contains the reduction descriptors for all
0265   /// of the reductions that were found in the loop.
0266   using ReductionList = MapVector<PHINode *, RecurrenceDescriptor>;
0267 
0268   /// InductionList saves induction variables and maps them to the
0269   /// induction descriptor.
0270   using InductionList = MapVector<PHINode *, InductionDescriptor>;
0271 
0272   /// RecurrenceSet contains the phi nodes that are recurrences other than
0273   /// inductions and reductions.
0274   using RecurrenceSet = SmallPtrSet<const PHINode *, 8>;
0275 
0276   /// Returns true if it is legal to vectorize this loop.
0277   /// This does not mean that it is profitable to vectorize this
0278   /// loop, only that it is legal to do so.
0279   /// Temporarily taking UseVPlanNativePath parameter. If true, take
0280   /// the new code path being implemented for outer loop vectorization
0281   /// (should be functional for inner loop vectorization) based on VPlan.
0282   /// If false, good old LV code.
0283   bool canVectorize(bool UseVPlanNativePath);
0284 
0285   /// Returns true if it is legal to vectorize the FP math operations in this
0286   /// loop. Vectorizing is legal if we allow reordering of FP operations, or if
0287   /// we can use in-order reductions.
0288   bool canVectorizeFPMath(bool EnableStrictReductions);
0289 
0290   /// Return true if we can vectorize this loop while folding its tail by
0291   /// masking.
0292   bool canFoldTailByMasking() const;
0293 
0294   /// Mark all respective loads/stores for masking. Must only be called when
0295   /// tail-folding is possible.
0296   void prepareToFoldTailByMasking();
0297 
0298   /// Returns the primary induction variable.
0299   PHINode *getPrimaryInduction() { return PrimaryInduction; }
0300 
0301   /// Returns the reduction variables found in the loop.
0302   const ReductionList &getReductionVars() const { return Reductions; }
0303 
0304   /// Returns the induction variables found in the loop.
0305   const InductionList &getInductionVars() const { return Inductions; }
0306 
0307   /// Return the fixed-order recurrences found in the loop.
0308   RecurrenceSet &getFixedOrderRecurrences() { return FixedOrderRecurrences; }
0309 
0310   /// Returns the widest induction type.
0311   Type *getWidestInductionType() { return WidestIndTy; }
0312 
0313   /// Returns True if given store is a final invariant store of one of the
0314   /// reductions found in the loop.
0315   bool isInvariantStoreOfReduction(StoreInst *SI);
0316 
0317   /// Returns True if given address is invariant and is used to store recurrent
0318   /// expression
0319   bool isInvariantAddressOfReduction(Value *V);
0320 
0321   /// Returns True if V is a Phi node of an induction variable in this loop.
0322   bool isInductionPhi(const Value *V) const;
0323 
0324   /// Returns a pointer to the induction descriptor, if \p Phi is an integer or
0325   /// floating point induction.
0326   const InductionDescriptor *getIntOrFpInductionDescriptor(PHINode *Phi) const;
0327 
0328   /// Returns a pointer to the induction descriptor, if \p Phi is pointer
0329   /// induction.
0330   const InductionDescriptor *getPointerInductionDescriptor(PHINode *Phi) const;
0331 
0332   /// Returns True if V is a cast that is part of an induction def-use chain,
0333   /// and had been proven to be redundant under a runtime guard (in other
0334   /// words, the cast has the same SCEV expression as the induction phi).
0335   bool isCastedInductionVariable(const Value *V) const;
0336 
0337   /// Returns True if V can be considered as an induction variable in this
0338   /// loop. V can be the induction phi, or some redundant cast in the def-use
0339   /// chain of the inducion phi.
0340   bool isInductionVariable(const Value *V) const;
0341 
0342   /// Returns True if PN is a reduction variable in this loop.
0343   bool isReductionVariable(PHINode *PN) const { return Reductions.count(PN); }
0344 
0345   /// Returns True if Phi is a fixed-order recurrence in this loop.
0346   bool isFixedOrderRecurrence(const PHINode *Phi) const;
0347 
0348   /// Return true if the block BB needs to be predicated in order for the loop
0349   /// to be vectorized.
0350   bool blockNeedsPredication(BasicBlock *BB) const;
0351 
0352   /// Check if this pointer is consecutive when vectorizing. This happens
0353   /// when the last index of the GEP is the induction variable, or that the
0354   /// pointer itself is an induction variable.
0355   /// This check allows us to vectorize A[idx] into a wide load/store.
0356   /// Returns:
0357   /// 0 - Stride is unknown or non-consecutive.
0358   /// 1 - Address is consecutive.
0359   /// -1 - Address is consecutive, and decreasing.
0360   /// NOTE: This method must only be used before modifying the original scalar
0361   /// loop. Do not use after invoking 'createVectorizedLoopSkeleton' (PR34965).
0362   int isConsecutivePtr(Type *AccessTy, Value *Ptr) const;
0363 
0364   /// Returns true if \p V is invariant across all loop iterations according to
0365   /// SCEV.
0366   bool isInvariant(Value *V) const;
0367 
0368   /// Returns true if value V is uniform across \p VF lanes, when \p VF is
0369   /// provided, and otherwise if \p V is invariant across all loop iterations.
0370   bool isUniform(Value *V, ElementCount VF) const;
0371 
0372   /// A uniform memory op is a load or store which accesses the same memory
0373   /// location on all \p VF lanes, if \p VF is provided and otherwise if the
0374   /// memory location is invariant.
0375   bool isUniformMemOp(Instruction &I, ElementCount VF) const;
0376 
0377   /// Returns the information that we collected about runtime memory check.
0378   const RuntimePointerChecking *getRuntimePointerChecking() const {
0379     return LAI->getRuntimePointerChecking();
0380   }
0381 
0382   const LoopAccessInfo *getLAI() const { return LAI; }
0383 
0384   bool isSafeForAnyVectorWidth() const {
0385     return LAI->getDepChecker().isSafeForAnyVectorWidth();
0386   }
0387 
0388   uint64_t getMaxSafeVectorWidthInBits() const {
0389     return LAI->getDepChecker().getMaxSafeVectorWidthInBits();
0390   }
0391 
0392   /// Returns true if the loop has exactly one uncountable early exit, i.e. an
0393   /// uncountable exit that isn't the latch block.
0394   bool hasUncountableEarlyExit() const {
0395     return getUncountableEdge().has_value();
0396   }
0397 
0398   /// Returns the uncountable early exiting block, if there is exactly one.
0399   BasicBlock *getUncountableEarlyExitingBlock() const {
0400     return hasUncountableEarlyExit() ? getUncountableEdge()->first : nullptr;
0401   }
0402 
0403   /// Returns the destination of the uncountable early exiting block, if there
0404   /// is exactly one.
0405   BasicBlock *getUncountableEarlyExitBlock() const {
0406     return hasUncountableEarlyExit() ? getUncountableEdge()->second : nullptr;
0407   }
0408 
0409   /// Returns true if vector representation of the instruction \p I
0410   /// requires mask.
0411   bool isMaskRequired(const Instruction *I) const {
0412     return MaskedOp.contains(I);
0413   }
0414 
0415   /// Returns true if there is at least one function call in the loop which
0416   /// has a vectorized variant available.
0417   bool hasVectorCallVariants() const { return VecCallVariantsFound; }
0418 
0419   /// Returns true if there is at least one function call in the loop which
0420   /// returns a struct type and needs to be vectorized.
0421   bool hasStructVectorCall() const { return StructVecCallFound; }
0422 
0423   unsigned getNumStores() const { return LAI->getNumStores(); }
0424   unsigned getNumLoads() const { return LAI->getNumLoads(); }
0425 
0426   /// Returns a HistogramInfo* for the given instruction if it was determined
0427   /// to be part of a load -> update -> store sequence where multiple lanes
0428   /// may be working on the same memory address.
0429   std::optional<const HistogramInfo *> getHistogramInfo(Instruction *I) const {
0430     for (const HistogramInfo &HGram : Histograms)
0431       if (HGram.Load == I || HGram.Update == I || HGram.Store == I)
0432         return &HGram;
0433 
0434     return std::nullopt;
0435   }
0436 
0437   /// Returns a list of all known histogram operations in the loop.
0438   bool hasHistograms() const { return !Histograms.empty(); }
0439 
0440   PredicatedScalarEvolution *getPredicatedScalarEvolution() const {
0441     return &PSE;
0442   }
0443 
0444   Loop *getLoop() const { return TheLoop; }
0445 
0446   LoopInfo *getLoopInfo() const { return LI; }
0447 
0448   AssumptionCache *getAssumptionCache() const { return AC; }
0449 
0450   ScalarEvolution *getScalarEvolution() const { return PSE.getSE(); }
0451 
0452   DominatorTree *getDominatorTree() const { return DT; }
0453 
0454   /// Returns all exiting blocks with a countable exit, i.e. the
0455   /// exit-not-taken count is known exactly at compile time.
0456   const SmallVector<BasicBlock *, 4> &getCountableExitingBlocks() const {
0457     return CountableExitingBlocks;
0458   }
0459 
0460   /// Returns the loop edge to an uncountable exit, or std::nullopt if there
0461   /// isn't a single such edge.
0462   std::optional<std::pair<BasicBlock *, BasicBlock *>>
0463   getUncountableEdge() const {
0464     return UncountableEdge;
0465   }
0466 
0467 private:
0468   /// Return true if the pre-header, exiting and latch blocks of \p Lp and all
0469   /// its nested loops are considered legal for vectorization. These legal
0470   /// checks are common for inner and outer loop vectorization.
0471   /// Temporarily taking UseVPlanNativePath parameter. If true, take
0472   /// the new code path being implemented for outer loop vectorization
0473   /// (should be functional for inner loop vectorization) based on VPlan.
0474   /// If false, good old LV code.
0475   bool canVectorizeLoopNestCFG(Loop *Lp, bool UseVPlanNativePath);
0476 
0477   /// Set up outer loop inductions by checking Phis in outer loop header for
0478   /// supported inductions (int inductions). Return false if any of these Phis
0479   /// is not a supported induction or if we fail to find an induction.
0480   bool setupOuterLoopInductions();
0481 
0482   /// Return true if the pre-header, exiting and latch blocks of \p Lp
0483   /// (non-recursive) are considered legal for vectorization.
0484   /// Temporarily taking UseVPlanNativePath parameter. If true, take
0485   /// the new code path being implemented for outer loop vectorization
0486   /// (should be functional for inner loop vectorization) based on VPlan.
0487   /// If false, good old LV code.
0488   bool canVectorizeLoopCFG(Loop *Lp, bool UseVPlanNativePath);
0489 
0490   /// Check if a single basic block loop is vectorizable.
0491   /// At this point we know that this is a loop with a constant trip count
0492   /// and we only need to check individual instructions.
0493   bool canVectorizeInstrs();
0494 
0495   /// When we vectorize loops we may change the order in which
0496   /// we read and write from memory. This method checks if it is
0497   /// legal to vectorize the code, considering only memory constrains.
0498   /// Returns true if the loop is vectorizable
0499   bool canVectorizeMemory();
0500 
0501   /// If LAA cannot determine whether all dependences are safe, we may be able
0502   /// to further analyse some IndirectUnsafe dependences and if they match a
0503   /// certain pattern (like a histogram) then we may still be able to vectorize.
0504   bool canVectorizeIndirectUnsafeDependences();
0505 
0506   /// Return true if we can vectorize this loop using the IF-conversion
0507   /// transformation.
0508   bool canVectorizeWithIfConvert();
0509 
0510   /// Return true if we can vectorize this outer loop. The method performs
0511   /// specific checks for outer loop vectorization.
0512   bool canVectorizeOuterLoop();
0513 
0514   /// Returns true if this is an early exit loop that can be vectorized.
0515   /// Currently, a loop with an uncountable early exit is considered
0516   /// vectorizable if:
0517   ///   1. There are no writes to memory in the loop.
0518   ///   2. The loop has only one early uncountable exit
0519   ///   3. The early exit block dominates the latch block.
0520   ///   4. The latch block has an exact exit count.
0521   ///   5. The loop does not contain reductions or recurrences.
0522   ///   6. We can prove at compile-time that loops will not contain faulting
0523   ///   loads.
0524   ///   7. It is safe to speculatively execute instructions such as divide or
0525   ///   call instructions.
0526   /// The list above is not based on theoretical limitations of vectorization,
0527   /// but simply a statement that more work is needed to support these
0528   /// additional cases safely.
0529   bool isVectorizableEarlyExitLoop();
0530 
0531   /// Return true if all of the instructions in the block can be speculatively
0532   /// executed, and record the loads/stores that require masking.
0533   /// \p SafePtrs is a list of addresses that are known to be legal and we know
0534   /// that we can read from them without segfault.
0535   /// \p MaskedOp is a list of instructions that have to be transformed into
0536   /// calls to the appropriate masked intrinsic when the loop is vectorized
0537   /// or dropped if the instruction is a conditional assume intrinsic.
0538   bool
0539   blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs,
0540                        SmallPtrSetImpl<const Instruction *> &MaskedOp) const;
0541 
0542   /// Updates the vectorization state by adding \p Phi to the inductions list.
0543   /// This can set \p Phi as the main induction of the loop if \p Phi is a
0544   /// better choice for the main induction than the existing one.
0545   void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID,
0546                        SmallPtrSetImpl<Value *> &AllowedExit);
0547 
0548   /// The loop that we evaluate.
0549   Loop *TheLoop;
0550 
0551   /// Loop Info analysis.
0552   LoopInfo *LI;
0553 
0554   /// A wrapper around ScalarEvolution used to add runtime SCEV checks.
0555   /// Applies dynamic knowledge to simplify SCEV expressions in the context
0556   /// of existing SCEV assumptions. The analysis will also add a minimal set
0557   /// of new predicates if this is required to enable vectorization and
0558   /// unrolling.
0559   PredicatedScalarEvolution &PSE;
0560 
0561   /// Target Transform Info.
0562   TargetTransformInfo *TTI;
0563 
0564   /// Target Library Info.
0565   TargetLibraryInfo *TLI;
0566 
0567   /// Dominator Tree.
0568   DominatorTree *DT;
0569 
0570   // LoopAccess analysis.
0571   LoopAccessInfoManager &LAIs;
0572 
0573   const LoopAccessInfo *LAI = nullptr;
0574 
0575   /// Interface to emit optimization remarks.
0576   OptimizationRemarkEmitter *ORE;
0577 
0578   //  ---  vectorization state --- //
0579 
0580   /// Holds the primary induction variable. This is the counter of the
0581   /// loop.
0582   PHINode *PrimaryInduction = nullptr;
0583 
0584   /// Holds the reduction variables.
0585   ReductionList Reductions;
0586 
0587   /// Holds all of the induction variables that we found in the loop.
0588   /// Notice that inductions don't need to start at zero and that induction
0589   /// variables can be pointers.
0590   InductionList Inductions;
0591 
0592   /// Holds all the casts that participate in the update chain of the induction
0593   /// variables, and that have been proven to be redundant (possibly under a
0594   /// runtime guard). These casts can be ignored when creating the vectorized
0595   /// loop body.
0596   SmallPtrSet<Instruction *, 4> InductionCastsToIgnore;
0597 
0598   /// Holds the phi nodes that are fixed-order recurrences.
0599   RecurrenceSet FixedOrderRecurrences;
0600 
0601   /// Holds the widest induction type encountered.
0602   Type *WidestIndTy = nullptr;
0603 
0604   /// Allowed outside users. This holds the variables that can be accessed from
0605   /// outside the loop.
0606   SmallPtrSet<Value *, 4> AllowedExit;
0607 
0608   /// Vectorization requirements that will go through late-evaluation.
0609   LoopVectorizationRequirements *Requirements;
0610 
0611   /// Used to emit an analysis of any legality issues.
0612   LoopVectorizeHints *Hints;
0613 
0614   /// The demanded bits analysis is used to compute the minimum type size in
0615   /// which a reduction can be computed.
0616   DemandedBits *DB;
0617 
0618   /// The assumption cache analysis is used to compute the minimum type size in
0619   /// which a reduction can be computed.
0620   AssumptionCache *AC;
0621 
0622   /// While vectorizing these instructions we have to generate a
0623   /// call to the appropriate masked intrinsic or drop them in case of
0624   /// conditional assumes.
0625   SmallPtrSet<const Instruction *, 8> MaskedOp;
0626 
0627   /// Contains all identified histogram operations, which are sequences of
0628   /// load -> update -> store instructions where multiple lanes in a vector
0629   /// may work on the same memory location.
0630   SmallVector<HistogramInfo, 1> Histograms;
0631 
0632   /// BFI and PSI are used to check for profile guided size optimizations.
0633   BlockFrequencyInfo *BFI;
0634   ProfileSummaryInfo *PSI;
0635 
0636   /// If we discover function calls within the loop which have a valid
0637   /// vectorized variant, record that fact so that LoopVectorize can
0638   /// (potentially) make a better decision on the maximum VF and enable
0639   /// the use of those function variants.
0640   bool VecCallVariantsFound = false;
0641 
0642   /// If we find a call (to be vectorized) that returns a struct type, record
0643   /// that so we can bail out until this is supported.
0644   /// TODO: Remove this flag once vectorizing calls with struct returns is
0645   /// supported.
0646   bool StructVecCallFound = false;
0647 
0648   /// Keep track of all the countable and uncountable exiting blocks if
0649   /// the exact backedge taken count is not computable.
0650   SmallVector<BasicBlock *, 4> CountableExitingBlocks;
0651 
0652   /// Keep track of the loop edge to an uncountable exit, comprising a pair
0653   /// of (Exiting, Exit) blocks, if there is exactly one early exit.
0654   std::optional<std::pair<BasicBlock *, BasicBlock *>> UncountableEdge;
0655 };
0656 
0657 } // namespace llvm
0658 
0659 #endif // LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H