Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2026-05-10 08:44:38

0001 //===- InstCombiner.h - InstCombine implementation --------------*- C++ -*-===//
0002 //
0003 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
0004 // See https://llvm.org/LICENSE.txt for license information.
0005 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
0006 //
0007 //===----------------------------------------------------------------------===//
0008 /// \file
0009 ///
0010 /// This file provides the interface for the instcombine pass implementation.
0011 /// The interface is used for generic transformations in this folder and
0012 /// target specific combinations in the targets.
0013 /// The visitor implementation is in \c InstCombinerImpl in
0014 /// \c InstCombineInternal.h.
0015 ///
0016 //===----------------------------------------------------------------------===//
0017 
0018 #ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H
0019 #define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H
0020 
0021 #include "llvm/ADT/PostOrderIterator.h"
0022 #include "llvm/Analysis/DomConditionCache.h"
0023 #include "llvm/Analysis/InstructionSimplify.h"
0024 #include "llvm/Analysis/TargetFolder.h"
0025 #include "llvm/Analysis/ValueTracking.h"
0026 #include "llvm/IR/IRBuilder.h"
0027 #include "llvm/IR/PatternMatch.h"
0028 #include "llvm/Support/Debug.h"
0029 #include "llvm/Support/KnownBits.h"
0030 #include <cassert>
0031 
0032 #define DEBUG_TYPE "instcombine"
0033 #include "llvm/Transforms/Utils/InstructionWorklist.h"
0034 
0035 namespace llvm {
0036 
0037 class AAResults;
0038 class AssumptionCache;
0039 class OptimizationRemarkEmitter;
0040 class ProfileSummaryInfo;
0041 class TargetLibraryInfo;
0042 class TargetTransformInfo;
0043 
0044 /// The core instruction combiner logic.
0045 ///
0046 /// This class provides both the logic to recursively visit instructions and
0047 /// combine them.
0048 class LLVM_LIBRARY_VISIBILITY InstCombiner {
0049   /// Only used to call target specific intrinsic combining.
0050   /// It must **NOT** be used for any other purpose, as InstCombine is a
0051   /// target-independent canonicalization transform.
0052   TargetTransformInfo &TTIForTargetIntrinsicsOnly;
0053 
0054 public:
0055   /// Maximum size of array considered when transforming.
0056   uint64_t MaxArraySizeForCombine = 0;
0057 
0058   /// An IRBuilder that automatically inserts new instructions into the
0059   /// worklist.
0060   using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>;
0061   BuilderTy &Builder;
0062 
0063 protected:
0064   /// A worklist of the instructions that need to be simplified.
0065   InstructionWorklist &Worklist;
0066 
0067   // Mode in which we are running the combiner.
0068   const bool MinimizeSize;
0069 
0070   AAResults *AA;
0071 
0072   // Required analyses.
0073   AssumptionCache &AC;
0074   TargetLibraryInfo &TLI;
0075   DominatorTree &DT;
0076   const DataLayout &DL;
0077   SimplifyQuery SQ;
0078   OptimizationRemarkEmitter &ORE;
0079   BlockFrequencyInfo *BFI;
0080   BranchProbabilityInfo *BPI;
0081   ProfileSummaryInfo *PSI;
0082   DomConditionCache DC;
0083 
0084   ReversePostOrderTraversal<BasicBlock *> &RPOT;
0085 
0086   bool MadeIRChange = false;
0087 
0088   /// Edges that are known to never be taken.
0089   SmallDenseSet<std::pair<BasicBlock *, BasicBlock *>, 8> DeadEdges;
0090 
0091   /// Order of predecessors to canonicalize phi nodes towards.
0092   SmallDenseMap<BasicBlock *, SmallVector<BasicBlock *>, 8> PredOrder;
0093 
0094   /// Backedges, used to avoid pushing instructions across backedges in cases
0095   /// where this may result in infinite combine loops. For irreducible loops
0096   /// this picks an arbitrary backedge.
0097   SmallDenseSet<std::pair<const BasicBlock *, const BasicBlock *>, 8> BackEdges;
0098   bool ComputedBackEdges = false;
0099 
0100 public:
0101   InstCombiner(InstructionWorklist &Worklist, BuilderTy &Builder,
0102                bool MinimizeSize, AAResults *AA, AssumptionCache &AC,
0103                TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
0104                DominatorTree &DT, OptimizationRemarkEmitter &ORE,
0105                BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI,
0106                ProfileSummaryInfo *PSI, const DataLayout &DL,
0107                ReversePostOrderTraversal<BasicBlock *> &RPOT)
0108       : TTIForTargetIntrinsicsOnly(TTI), Builder(Builder), Worklist(Worklist),
0109         MinimizeSize(MinimizeSize), AA(AA), AC(AC), TLI(TLI), DT(DT), DL(DL),
0110         SQ(DL, &TLI, &DT, &AC, nullptr, /*UseInstrInfo*/ true,
0111            /*CanUseUndef*/ true, &DC),
0112         ORE(ORE), BFI(BFI), BPI(BPI), PSI(PSI), RPOT(RPOT) {}
0113 
0114   virtual ~InstCombiner() = default;
0115 
0116   /// Return the source operand of a potentially bitcasted value while
0117   /// optionally checking if it has one use. If there is no bitcast or the one
0118   /// use check is not met, return the input value itself.
0119   static Value *peekThroughBitcast(Value *V, bool OneUseOnly = false) {
0120     if (auto *BitCast = dyn_cast<BitCastInst>(V))
0121       if (!OneUseOnly || BitCast->hasOneUse())
0122         return BitCast->getOperand(0);
0123 
0124     // V is not a bitcast or V has more than one use and OneUseOnly is true.
0125     return V;
0126   }
0127 
0128   /// Assign a complexity or rank value to LLVM Values. This is used to reduce
0129   /// the amount of pattern matching needed for compares and commutative
0130   /// instructions. For example, if we have:
0131   ///   icmp ugt X, Constant
0132   /// or
0133   ///   xor (add X, Constant), cast Z
0134   ///
0135   /// We do not have to consider the commuted variants of these patterns because
0136   /// canonicalization based on complexity guarantees the above ordering.
0137   ///
0138   /// This routine maps IR values to various complexity ranks:
0139   ///   0 -> undef
0140   ///   1 -> Constants
0141   ///   2 -> Cast and (f)neg/not instructions
0142   ///   3 -> Other instructions and arguments
0143   static unsigned getComplexity(Value *V) {
0144     if (isa<Constant>(V))
0145       return isa<UndefValue>(V) ? 0 : 1;
0146 
0147     if (isa<CastInst>(V) || match(V, m_Neg(PatternMatch::m_Value())) ||
0148         match(V, m_Not(PatternMatch::m_Value())) ||
0149         match(V, m_FNeg(PatternMatch::m_Value())))
0150       return 2;
0151 
0152     return 3;
0153   }
0154 
0155   /// Predicate canonicalization reduces the number of patterns that need to be
0156   /// matched by other transforms. For example, we may swap the operands of a
0157   /// conditional branch or select to create a compare with a canonical
0158   /// (inverted) predicate which is then more likely to be matched with other
0159   /// values.
0160   static bool isCanonicalPredicate(CmpPredicate Pred) {
0161     switch (Pred) {
0162     case CmpInst::ICMP_NE:
0163     case CmpInst::ICMP_ULE:
0164     case CmpInst::ICMP_SLE:
0165     case CmpInst::ICMP_UGE:
0166     case CmpInst::ICMP_SGE:
0167     // TODO: There are 16 FCMP predicates. Should others be (not) canonical?
0168     case CmpInst::FCMP_ONE:
0169     case CmpInst::FCMP_OLE:
0170     case CmpInst::FCMP_OGE:
0171       return false;
0172     default:
0173       return true;
0174     }
0175   }
0176 
0177   /// Add one to a Constant
0178   static Constant *AddOne(Constant *C) {
0179     return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
0180   }
0181 
0182   /// Subtract one from a Constant
0183   static Constant *SubOne(Constant *C) {
0184     return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
0185   }
0186 
0187   static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI) {
0188     // a ? b : false and a ? true : b are the canonical form of logical and/or.
0189     // This includes !a ? b : false and !a ? true : b. Absorbing the not into
0190     // the select by swapping operands would break recognition of this pattern
0191     // in other analyses, so don't do that.
0192     return match(&SI, PatternMatch::m_LogicalAnd(PatternMatch::m_Value(),
0193                                                  PatternMatch::m_Value())) ||
0194            match(&SI, PatternMatch::m_LogicalOr(PatternMatch::m_Value(),
0195                                                 PatternMatch::m_Value()));
0196   }
0197 
0198   /// Return nonnull value if V is free to invert under the condition of
0199   /// WillInvertAllUses.
0200   /// If Builder is nonnull, it will return a simplified ~V.
0201   /// If Builder is null, it will return an arbitrary nonnull value (not
0202   /// dereferenceable).
0203   /// If the inversion will consume instructions, `DoesConsume` will be set to
0204   /// true. Otherwise it will be false.
0205   Value *getFreelyInvertedImpl(Value *V, bool WillInvertAllUses,
0206                                       BuilderTy *Builder, bool &DoesConsume,
0207                                       unsigned Depth);
0208 
0209   Value *getFreelyInverted(Value *V, bool WillInvertAllUses,
0210                                   BuilderTy *Builder, bool &DoesConsume) {
0211     DoesConsume = false;
0212     return getFreelyInvertedImpl(V, WillInvertAllUses, Builder, DoesConsume,
0213                                  /*Depth*/ 0);
0214   }
0215 
0216   Value *getFreelyInverted(Value *V, bool WillInvertAllUses,
0217                                   BuilderTy *Builder) {
0218     bool Unused;
0219     return getFreelyInverted(V, WillInvertAllUses, Builder, Unused);
0220   }
0221 
0222   /// Return true if the specified value is free to invert (apply ~ to).
0223   /// This happens in cases where the ~ can be eliminated.  If WillInvertAllUses
0224   /// is true, work under the assumption that the caller intends to remove all
0225   /// uses of V and only keep uses of ~V.
0226   ///
0227   /// See also: canFreelyInvertAllUsersOf()
0228   bool isFreeToInvert(Value *V, bool WillInvertAllUses,
0229                              bool &DoesConsume) {
0230     return getFreelyInverted(V, WillInvertAllUses, /*Builder*/ nullptr,
0231                              DoesConsume) != nullptr;
0232   }
0233 
0234   bool isFreeToInvert(Value *V, bool WillInvertAllUses) {
0235     bool Unused;
0236     return isFreeToInvert(V, WillInvertAllUses, Unused);
0237   }
0238 
0239   /// Given i1 V, can every user of V be freely adapted if V is changed to !V ?
0240   /// InstCombine's freelyInvertAllUsersOf() must be kept in sync with this fn.
0241   /// NOTE: for Instructions only!
0242   ///
0243   /// See also: isFreeToInvert()
0244   bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser) {
0245     // Look at every user of V.
0246     for (Use &U : V->uses()) {
0247       if (U.getUser() == IgnoredUser)
0248         continue; // Don't consider this user.
0249 
0250       auto *I = cast<Instruction>(U.getUser());
0251       switch (I->getOpcode()) {
0252       case Instruction::Select:
0253         if (U.getOperandNo() != 0) // Only if the value is used as select cond.
0254           return false;
0255         if (shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(I)))
0256           return false;
0257         break;
0258       case Instruction::Br:
0259         assert(U.getOperandNo() == 0 && "Must be branching on that value.");
0260         break; // Free to invert by swapping true/false values/destinations.
0261       case Instruction::Xor: // Can invert 'xor' if it's a 'not', by ignoring
0262                              // it.
0263         if (!match(I, m_Not(PatternMatch::m_Value())))
0264           return false; // Not a 'not'.
0265         break;
0266       default:
0267         return false; // Don't know, likely not freely invertible.
0268       }
0269       // So far all users were free to invert...
0270     }
0271     return true; // Can freely invert all users!
0272   }
0273 
0274   /// Some binary operators require special handling to avoid poison and
0275   /// undefined behavior. If a constant vector has undef elements, replace those
0276   /// undefs with identity constants if possible because those are always safe
0277   /// to execute. If no identity constant exists, replace undef with some other
0278   /// safe constant.
0279   static Constant *
0280   getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In,
0281                                 bool IsRHSConstant) {
0282     auto *InVTy = cast<FixedVectorType>(In->getType());
0283 
0284     Type *EltTy = InVTy->getElementType();
0285     auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant);
0286     if (!SafeC) {
0287       // TODO: Should this be available as a constant utility function? It is
0288       // similar to getBinOpAbsorber().
0289       if (IsRHSConstant) {
0290         switch (Opcode) {
0291         case Instruction::SRem: // X % 1 = 0
0292         case Instruction::URem: // X %u 1 = 0
0293           SafeC = ConstantInt::get(EltTy, 1);
0294           break;
0295         case Instruction::FRem: // X % 1.0 (doesn't simplify, but it is safe)
0296           SafeC = ConstantFP::get(EltTy, 1.0);
0297           break;
0298         default:
0299           llvm_unreachable(
0300               "Only rem opcodes have no identity constant for RHS");
0301         }
0302       } else {
0303         switch (Opcode) {
0304         case Instruction::Shl:  // 0 << X = 0
0305         case Instruction::LShr: // 0 >>u X = 0
0306         case Instruction::AShr: // 0 >> X = 0
0307         case Instruction::SDiv: // 0 / X = 0
0308         case Instruction::UDiv: // 0 /u X = 0
0309         case Instruction::SRem: // 0 % X = 0
0310         case Instruction::URem: // 0 %u X = 0
0311         case Instruction::Sub:  // 0 - X (doesn't simplify, but it is safe)
0312         case Instruction::FSub: // 0.0 - X (doesn't simplify, but it is safe)
0313         case Instruction::FDiv: // 0.0 / X (doesn't simplify, but it is safe)
0314         case Instruction::FRem: // 0.0 % X = 0
0315           SafeC = Constant::getNullValue(EltTy);
0316           break;
0317         default:
0318           llvm_unreachable("Expected to find identity constant for opcode");
0319         }
0320       }
0321     }
0322     assert(SafeC && "Must have safe constant for binop");
0323     unsigned NumElts = InVTy->getNumElements();
0324     SmallVector<Constant *, 16> Out(NumElts);
0325     for (unsigned i = 0; i != NumElts; ++i) {
0326       Constant *C = In->getAggregateElement(i);
0327       Out[i] = isa<UndefValue>(C) ? SafeC : C;
0328     }
0329     return ConstantVector::get(Out);
0330   }
0331 
0332   void addToWorklist(Instruction *I) { Worklist.push(I); }
0333 
0334   AssumptionCache &getAssumptionCache() const { return AC; }
0335   TargetLibraryInfo &getTargetLibraryInfo() const { return TLI; }
0336   DominatorTree &getDominatorTree() const { return DT; }
0337   const DataLayout &getDataLayout() const { return DL; }
0338   const SimplifyQuery &getSimplifyQuery() const { return SQ; }
0339   OptimizationRemarkEmitter &getOptimizationRemarkEmitter() const {
0340     return ORE;
0341   }
0342   BlockFrequencyInfo *getBlockFrequencyInfo() const { return BFI; }
0343   ProfileSummaryInfo *getProfileSummaryInfo() const { return PSI; }
0344 
0345   // Call target specific combiners
0346   std::optional<Instruction *> targetInstCombineIntrinsic(IntrinsicInst &II);
0347   std::optional<Value *>
0348   targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask,
0349                                          KnownBits &Known,
0350                                          bool &KnownBitsComputed);
0351   std::optional<Value *> targetSimplifyDemandedVectorEltsIntrinsic(
0352       IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
0353       APInt &UndefElts2, APInt &UndefElts3,
0354       std::function<void(Instruction *, unsigned, APInt, APInt &)>
0355           SimplifyAndSetOp);
0356 
0357   void computeBackEdges();
0358   bool isBackEdge(const BasicBlock *From, const BasicBlock *To) {
0359     if (!ComputedBackEdges)
0360       computeBackEdges();
0361     return BackEdges.contains({From, To});
0362   }
0363 
0364   /// Inserts an instruction \p New before instruction \p Old
0365   ///
0366   /// Also adds the new instruction to the worklist and returns \p New so that
0367   /// it is suitable for use as the return from the visitation patterns.
0368   Instruction *InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old) {
0369     assert(New && !New->getParent() &&
0370            "New instruction already inserted into a basic block!");
0371     New->insertBefore(Old); // Insert inst
0372     Worklist.add(New);
0373     return New;
0374   }
0375 
0376   /// Same as InsertNewInstBefore, but also sets the debug loc.
0377   Instruction *InsertNewInstWith(Instruction *New, BasicBlock::iterator Old) {
0378     New->setDebugLoc(Old->getDebugLoc());
0379     return InsertNewInstBefore(New, Old);
0380   }
0381 
0382   /// A combiner-aware RAUW-like routine.
0383   ///
0384   /// This method is to be used when an instruction is found to be dead,
0385   /// replaceable with another preexisting expression. Here we add all uses of
0386   /// I to the worklist, replace all uses of I with the new value, then return
0387   /// I, so that the inst combiner will know that I was modified.
0388   Instruction *replaceInstUsesWith(Instruction &I, Value *V) {
0389     // If there are no uses to replace, then we return nullptr to indicate that
0390     // no changes were made to the program.
0391     if (I.use_empty()) return nullptr;
0392 
0393     Worklist.pushUsersToWorkList(I); // Add all modified instrs to worklist.
0394 
0395     // If we are replacing the instruction with itself, this must be in a
0396     // segment of unreachable code, so just clobber the instruction.
0397     if (&I == V)
0398       V = PoisonValue::get(I.getType());
0399 
0400     LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n"
0401                       << "    with " << *V << '\n');
0402 
0403     // If V is a new unnamed instruction, take the name from the old one.
0404     if (V->use_empty() && isa<Instruction>(V) && !V->hasName() && I.hasName())
0405       V->takeName(&I);
0406 
0407     I.replaceAllUsesWith(V);
0408     return &I;
0409   }
0410 
0411   /// Replace operand of instruction and add old operand to the worklist.
0412   Instruction *replaceOperand(Instruction &I, unsigned OpNum, Value *V) {
0413     Value *OldOp = I.getOperand(OpNum);
0414     I.setOperand(OpNum, V);
0415     Worklist.handleUseCountDecrement(OldOp);
0416     return &I;
0417   }
0418 
0419   /// Replace use and add the previously used value to the worklist.
0420   void replaceUse(Use &U, Value *NewValue) {
0421     Value *OldOp = U;
0422     U = NewValue;
0423     Worklist.handleUseCountDecrement(OldOp);
0424   }
0425 
0426   /// Combiner aware instruction erasure.
0427   ///
0428   /// When dealing with an instruction that has side effects or produces a void
0429   /// value, we can't rely on DCE to delete the instruction. Instead, visit
0430   /// methods should return the value returned by this function.
0431   virtual Instruction *eraseInstFromFunction(Instruction &I) = 0;
0432 
0433   void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
0434                         const Instruction *CxtI) const {
0435     llvm::computeKnownBits(V, Known, Depth, SQ.getWithInstruction(CxtI));
0436   }
0437 
0438   KnownBits computeKnownBits(const Value *V, unsigned Depth,
0439                              const Instruction *CxtI) const {
0440     return llvm::computeKnownBits(V, Depth, SQ.getWithInstruction(CxtI));
0441   }
0442 
0443   bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false,
0444                               unsigned Depth = 0,
0445                               const Instruction *CxtI = nullptr) {
0446     return llvm::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
0447                                         SQ.getWithInstruction(CxtI));
0448   }
0449 
0450   bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0,
0451                          const Instruction *CxtI = nullptr) const {
0452     return llvm::MaskedValueIsZero(V, Mask, SQ.getWithInstruction(CxtI), Depth);
0453   }
0454 
0455   unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0,
0456                               const Instruction *CxtI = nullptr) const {
0457     return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT);
0458   }
0459 
0460   unsigned ComputeMaxSignificantBits(const Value *Op, unsigned Depth = 0,
0461                                      const Instruction *CxtI = nullptr) const {
0462     return llvm::ComputeMaxSignificantBits(Op, DL, Depth, &AC, CxtI, &DT);
0463   }
0464 
0465   OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
0466                                                const Value *RHS,
0467                                                const Instruction *CxtI,
0468                                                bool IsNSW = false) const {
0469     return llvm::computeOverflowForUnsignedMul(
0470         LHS, RHS, SQ.getWithInstruction(CxtI), IsNSW);
0471   }
0472 
0473   OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
0474                                              const Instruction *CxtI) const {
0475     return llvm::computeOverflowForSignedMul(LHS, RHS,
0476                                              SQ.getWithInstruction(CxtI));
0477   }
0478 
0479   OverflowResult
0480   computeOverflowForUnsignedAdd(const WithCache<const Value *> &LHS,
0481                                 const WithCache<const Value *> &RHS,
0482                                 const Instruction *CxtI) const {
0483     return llvm::computeOverflowForUnsignedAdd(LHS, RHS,
0484                                                SQ.getWithInstruction(CxtI));
0485   }
0486 
0487   OverflowResult
0488   computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
0489                               const WithCache<const Value *> &RHS,
0490                               const Instruction *CxtI) const {
0491     return llvm::computeOverflowForSignedAdd(LHS, RHS,
0492                                              SQ.getWithInstruction(CxtI));
0493   }
0494 
0495   OverflowResult computeOverflowForUnsignedSub(const Value *LHS,
0496                                                const Value *RHS,
0497                                                const Instruction *CxtI) const {
0498     return llvm::computeOverflowForUnsignedSub(LHS, RHS,
0499                                                SQ.getWithInstruction(CxtI));
0500   }
0501 
0502   OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
0503                                              const Instruction *CxtI) const {
0504     return llvm::computeOverflowForSignedSub(LHS, RHS,
0505                                              SQ.getWithInstruction(CxtI));
0506   }
0507 
0508   virtual bool SimplifyDemandedBits(Instruction *I, unsigned OpNo,
0509                                     const APInt &DemandedMask, KnownBits &Known,
0510                                     unsigned Depth, const SimplifyQuery &Q) = 0;
0511 
0512   bool SimplifyDemandedBits(Instruction *I, unsigned OpNo,
0513                             const APInt &DemandedMask, KnownBits &Known) {
0514     return SimplifyDemandedBits(I, OpNo, DemandedMask, Known,
0515                                 /*Depth=*/0, SQ.getWithInstruction(I));
0516   }
0517 
0518   virtual Value *
0519   SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts,
0520                              unsigned Depth = 0,
0521                              bool AllowMultipleUsers = false) = 0;
0522 
0523   bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
0524 };
0525 
0526 } // namespace llvm
0527 
0528 #undef DEBUG_TYPE
0529 
0530 #endif