File indexing completed on 2026-05-10 08:44:38
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H
0019 #define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H
0020
0021 #include "llvm/ADT/PostOrderIterator.h"
0022 #include "llvm/Analysis/DomConditionCache.h"
0023 #include "llvm/Analysis/InstructionSimplify.h"
0024 #include "llvm/Analysis/TargetFolder.h"
0025 #include "llvm/Analysis/ValueTracking.h"
0026 #include "llvm/IR/IRBuilder.h"
0027 #include "llvm/IR/PatternMatch.h"
0028 #include "llvm/Support/Debug.h"
0029 #include "llvm/Support/KnownBits.h"
0030 #include <cassert>
0031
0032 #define DEBUG_TYPE "instcombine"
0033 #include "llvm/Transforms/Utils/InstructionWorklist.h"
0034
0035 namespace llvm {
0036
0037 class AAResults;
0038 class AssumptionCache;
0039 class OptimizationRemarkEmitter;
0040 class ProfileSummaryInfo;
0041 class TargetLibraryInfo;
0042 class TargetTransformInfo;
0043
0044
0045
0046
0047
0048 class LLVM_LIBRARY_VISIBILITY InstCombiner {
0049
0050
0051
0052 TargetTransformInfo &TTIForTargetIntrinsicsOnly;
0053
0054 public:
0055
0056 uint64_t MaxArraySizeForCombine = 0;
0057
0058
0059
0060 using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>;
0061 BuilderTy &Builder;
0062
0063 protected:
0064
0065 InstructionWorklist &Worklist;
0066
0067
0068 const bool MinimizeSize;
0069
0070 AAResults *AA;
0071
0072
0073 AssumptionCache &AC;
0074 TargetLibraryInfo &TLI;
0075 DominatorTree &DT;
0076 const DataLayout &DL;
0077 SimplifyQuery SQ;
0078 OptimizationRemarkEmitter &ORE;
0079 BlockFrequencyInfo *BFI;
0080 BranchProbabilityInfo *BPI;
0081 ProfileSummaryInfo *PSI;
0082 DomConditionCache DC;
0083
0084 ReversePostOrderTraversal<BasicBlock *> &RPOT;
0085
0086 bool MadeIRChange = false;
0087
0088
0089 SmallDenseSet<std::pair<BasicBlock *, BasicBlock *>, 8> DeadEdges;
0090
0091
0092 SmallDenseMap<BasicBlock *, SmallVector<BasicBlock *>, 8> PredOrder;
0093
0094
0095
0096
0097 SmallDenseSet<std::pair<const BasicBlock *, const BasicBlock *>, 8> BackEdges;
0098 bool ComputedBackEdges = false;
0099
0100 public:
0101 InstCombiner(InstructionWorklist &Worklist, BuilderTy &Builder,
0102 bool MinimizeSize, AAResults *AA, AssumptionCache &AC,
0103 TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
0104 DominatorTree &DT, OptimizationRemarkEmitter &ORE,
0105 BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI,
0106 ProfileSummaryInfo *PSI, const DataLayout &DL,
0107 ReversePostOrderTraversal<BasicBlock *> &RPOT)
0108 : TTIForTargetIntrinsicsOnly(TTI), Builder(Builder), Worklist(Worklist),
0109 MinimizeSize(MinimizeSize), AA(AA), AC(AC), TLI(TLI), DT(DT), DL(DL),
0110 SQ(DL, &TLI, &DT, &AC, nullptr, true,
0111 true, &DC),
0112 ORE(ORE), BFI(BFI), BPI(BPI), PSI(PSI), RPOT(RPOT) {}
0113
0114 virtual ~InstCombiner() = default;
0115
0116
0117
0118
0119 static Value *peekThroughBitcast(Value *V, bool OneUseOnly = false) {
0120 if (auto *BitCast = dyn_cast<BitCastInst>(V))
0121 if (!OneUseOnly || BitCast->hasOneUse())
0122 return BitCast->getOperand(0);
0123
0124
0125 return V;
0126 }
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143 static unsigned getComplexity(Value *V) {
0144 if (isa<Constant>(V))
0145 return isa<UndefValue>(V) ? 0 : 1;
0146
0147 if (isa<CastInst>(V) || match(V, m_Neg(PatternMatch::m_Value())) ||
0148 match(V, m_Not(PatternMatch::m_Value())) ||
0149 match(V, m_FNeg(PatternMatch::m_Value())))
0150 return 2;
0151
0152 return 3;
0153 }
0154
0155
0156
0157
0158
0159
0160 static bool isCanonicalPredicate(CmpPredicate Pred) {
0161 switch (Pred) {
0162 case CmpInst::ICMP_NE:
0163 case CmpInst::ICMP_ULE:
0164 case CmpInst::ICMP_SLE:
0165 case CmpInst::ICMP_UGE:
0166 case CmpInst::ICMP_SGE:
0167
0168 case CmpInst::FCMP_ONE:
0169 case CmpInst::FCMP_OLE:
0170 case CmpInst::FCMP_OGE:
0171 return false;
0172 default:
0173 return true;
0174 }
0175 }
0176
0177
0178 static Constant *AddOne(Constant *C) {
0179 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
0180 }
0181
0182
0183 static Constant *SubOne(Constant *C) {
0184 return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
0185 }
0186
0187 static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI) {
0188
0189
0190
0191
0192 return match(&SI, PatternMatch::m_LogicalAnd(PatternMatch::m_Value(),
0193 PatternMatch::m_Value())) ||
0194 match(&SI, PatternMatch::m_LogicalOr(PatternMatch::m_Value(),
0195 PatternMatch::m_Value()));
0196 }
0197
0198
0199
0200
0201
0202
0203
0204
0205 Value *getFreelyInvertedImpl(Value *V, bool WillInvertAllUses,
0206 BuilderTy *Builder, bool &DoesConsume,
0207 unsigned Depth);
0208
0209 Value *getFreelyInverted(Value *V, bool WillInvertAllUses,
0210 BuilderTy *Builder, bool &DoesConsume) {
0211 DoesConsume = false;
0212 return getFreelyInvertedImpl(V, WillInvertAllUses, Builder, DoesConsume,
0213 0);
0214 }
0215
0216 Value *getFreelyInverted(Value *V, bool WillInvertAllUses,
0217 BuilderTy *Builder) {
0218 bool Unused;
0219 return getFreelyInverted(V, WillInvertAllUses, Builder, Unused);
0220 }
0221
0222
0223
0224
0225
0226
0227
0228 bool isFreeToInvert(Value *V, bool WillInvertAllUses,
0229 bool &DoesConsume) {
0230 return getFreelyInverted(V, WillInvertAllUses, nullptr,
0231 DoesConsume) != nullptr;
0232 }
0233
0234 bool isFreeToInvert(Value *V, bool WillInvertAllUses) {
0235 bool Unused;
0236 return isFreeToInvert(V, WillInvertAllUses, Unused);
0237 }
0238
0239
0240
0241
0242
0243
0244 bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser) {
0245
0246 for (Use &U : V->uses()) {
0247 if (U.getUser() == IgnoredUser)
0248 continue;
0249
0250 auto *I = cast<Instruction>(U.getUser());
0251 switch (I->getOpcode()) {
0252 case Instruction::Select:
0253 if (U.getOperandNo() != 0)
0254 return false;
0255 if (shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(I)))
0256 return false;
0257 break;
0258 case Instruction::Br:
0259 assert(U.getOperandNo() == 0 && "Must be branching on that value.");
0260 break;
0261 case Instruction::Xor:
0262
0263 if (!match(I, m_Not(PatternMatch::m_Value())))
0264 return false;
0265 break;
0266 default:
0267 return false;
0268 }
0269
0270 }
0271 return true;
0272 }
0273
0274
0275
0276
0277
0278
0279 static Constant *
0280 getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In,
0281 bool IsRHSConstant) {
0282 auto *InVTy = cast<FixedVectorType>(In->getType());
0283
0284 Type *EltTy = InVTy->getElementType();
0285 auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant);
0286 if (!SafeC) {
0287
0288
0289 if (IsRHSConstant) {
0290 switch (Opcode) {
0291 case Instruction::SRem:
0292 case Instruction::URem:
0293 SafeC = ConstantInt::get(EltTy, 1);
0294 break;
0295 case Instruction::FRem:
0296 SafeC = ConstantFP::get(EltTy, 1.0);
0297 break;
0298 default:
0299 llvm_unreachable(
0300 "Only rem opcodes have no identity constant for RHS");
0301 }
0302 } else {
0303 switch (Opcode) {
0304 case Instruction::Shl:
0305 case Instruction::LShr:
0306 case Instruction::AShr:
0307 case Instruction::SDiv:
0308 case Instruction::UDiv:
0309 case Instruction::SRem:
0310 case Instruction::URem:
0311 case Instruction::Sub:
0312 case Instruction::FSub:
0313 case Instruction::FDiv:
0314 case Instruction::FRem:
0315 SafeC = Constant::getNullValue(EltTy);
0316 break;
0317 default:
0318 llvm_unreachable("Expected to find identity constant for opcode");
0319 }
0320 }
0321 }
0322 assert(SafeC && "Must have safe constant for binop");
0323 unsigned NumElts = InVTy->getNumElements();
0324 SmallVector<Constant *, 16> Out(NumElts);
0325 for (unsigned i = 0; i != NumElts; ++i) {
0326 Constant *C = In->getAggregateElement(i);
0327 Out[i] = isa<UndefValue>(C) ? SafeC : C;
0328 }
0329 return ConstantVector::get(Out);
0330 }
0331
0332 void addToWorklist(Instruction *I) { Worklist.push(I); }
0333
0334 AssumptionCache &getAssumptionCache() const { return AC; }
0335 TargetLibraryInfo &getTargetLibraryInfo() const { return TLI; }
0336 DominatorTree &getDominatorTree() const { return DT; }
0337 const DataLayout &getDataLayout() const { return DL; }
0338 const SimplifyQuery &getSimplifyQuery() const { return SQ; }
0339 OptimizationRemarkEmitter &getOptimizationRemarkEmitter() const {
0340 return ORE;
0341 }
0342 BlockFrequencyInfo *getBlockFrequencyInfo() const { return BFI; }
0343 ProfileSummaryInfo *getProfileSummaryInfo() const { return PSI; }
0344
0345
0346 std::optional<Instruction *> targetInstCombineIntrinsic(IntrinsicInst &II);
0347 std::optional<Value *>
0348 targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask,
0349 KnownBits &Known,
0350 bool &KnownBitsComputed);
0351 std::optional<Value *> targetSimplifyDemandedVectorEltsIntrinsic(
0352 IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
0353 APInt &UndefElts2, APInt &UndefElts3,
0354 std::function<void(Instruction *, unsigned, APInt, APInt &)>
0355 SimplifyAndSetOp);
0356
0357 void computeBackEdges();
0358 bool isBackEdge(const BasicBlock *From, const BasicBlock *To) {
0359 if (!ComputedBackEdges)
0360 computeBackEdges();
0361 return BackEdges.contains({From, To});
0362 }
0363
0364
0365
0366
0367
0368 Instruction *InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old) {
0369 assert(New && !New->getParent() &&
0370 "New instruction already inserted into a basic block!");
0371 New->insertBefore(Old);
0372 Worklist.add(New);
0373 return New;
0374 }
0375
0376
0377 Instruction *InsertNewInstWith(Instruction *New, BasicBlock::iterator Old) {
0378 New->setDebugLoc(Old->getDebugLoc());
0379 return InsertNewInstBefore(New, Old);
0380 }
0381
0382
0383
0384
0385
0386
0387
0388 Instruction *replaceInstUsesWith(Instruction &I, Value *V) {
0389
0390
0391 if (I.use_empty()) return nullptr;
0392
0393 Worklist.pushUsersToWorkList(I);
0394
0395
0396
0397 if (&I == V)
0398 V = PoisonValue::get(I.getType());
0399
0400 LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n"
0401 << " with " << *V << '\n');
0402
0403
0404 if (V->use_empty() && isa<Instruction>(V) && !V->hasName() && I.hasName())
0405 V->takeName(&I);
0406
0407 I.replaceAllUsesWith(V);
0408 return &I;
0409 }
0410
0411
0412 Instruction *replaceOperand(Instruction &I, unsigned OpNum, Value *V) {
0413 Value *OldOp = I.getOperand(OpNum);
0414 I.setOperand(OpNum, V);
0415 Worklist.handleUseCountDecrement(OldOp);
0416 return &I;
0417 }
0418
0419
0420 void replaceUse(Use &U, Value *NewValue) {
0421 Value *OldOp = U;
0422 U = NewValue;
0423 Worklist.handleUseCountDecrement(OldOp);
0424 }
0425
0426
0427
0428
0429
0430
0431 virtual Instruction *eraseInstFromFunction(Instruction &I) = 0;
0432
0433 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
0434 const Instruction *CxtI) const {
0435 llvm::computeKnownBits(V, Known, Depth, SQ.getWithInstruction(CxtI));
0436 }
0437
0438 KnownBits computeKnownBits(const Value *V, unsigned Depth,
0439 const Instruction *CxtI) const {
0440 return llvm::computeKnownBits(V, Depth, SQ.getWithInstruction(CxtI));
0441 }
0442
0443 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false,
0444 unsigned Depth = 0,
0445 const Instruction *CxtI = nullptr) {
0446 return llvm::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
0447 SQ.getWithInstruction(CxtI));
0448 }
0449
0450 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0,
0451 const Instruction *CxtI = nullptr) const {
0452 return llvm::MaskedValueIsZero(V, Mask, SQ.getWithInstruction(CxtI), Depth);
0453 }
0454
0455 unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0,
0456 const Instruction *CxtI = nullptr) const {
0457 return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT);
0458 }
0459
0460 unsigned ComputeMaxSignificantBits(const Value *Op, unsigned Depth = 0,
0461 const Instruction *CxtI = nullptr) const {
0462 return llvm::ComputeMaxSignificantBits(Op, DL, Depth, &AC, CxtI, &DT);
0463 }
0464
0465 OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
0466 const Value *RHS,
0467 const Instruction *CxtI,
0468 bool IsNSW = false) const {
0469 return llvm::computeOverflowForUnsignedMul(
0470 LHS, RHS, SQ.getWithInstruction(CxtI), IsNSW);
0471 }
0472
0473 OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
0474 const Instruction *CxtI) const {
0475 return llvm::computeOverflowForSignedMul(LHS, RHS,
0476 SQ.getWithInstruction(CxtI));
0477 }
0478
0479 OverflowResult
0480 computeOverflowForUnsignedAdd(const WithCache<const Value *> &LHS,
0481 const WithCache<const Value *> &RHS,
0482 const Instruction *CxtI) const {
0483 return llvm::computeOverflowForUnsignedAdd(LHS, RHS,
0484 SQ.getWithInstruction(CxtI));
0485 }
0486
0487 OverflowResult
0488 computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
0489 const WithCache<const Value *> &RHS,
0490 const Instruction *CxtI) const {
0491 return llvm::computeOverflowForSignedAdd(LHS, RHS,
0492 SQ.getWithInstruction(CxtI));
0493 }
0494
0495 OverflowResult computeOverflowForUnsignedSub(const Value *LHS,
0496 const Value *RHS,
0497 const Instruction *CxtI) const {
0498 return llvm::computeOverflowForUnsignedSub(LHS, RHS,
0499 SQ.getWithInstruction(CxtI));
0500 }
0501
0502 OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
0503 const Instruction *CxtI) const {
0504 return llvm::computeOverflowForSignedSub(LHS, RHS,
0505 SQ.getWithInstruction(CxtI));
0506 }
0507
0508 virtual bool SimplifyDemandedBits(Instruction *I, unsigned OpNo,
0509 const APInt &DemandedMask, KnownBits &Known,
0510 unsigned Depth, const SimplifyQuery &Q) = 0;
0511
0512 bool SimplifyDemandedBits(Instruction *I, unsigned OpNo,
0513 const APInt &DemandedMask, KnownBits &Known) {
0514 return SimplifyDemandedBits(I, OpNo, DemandedMask, Known,
0515 0, SQ.getWithInstruction(I));
0516 }
0517
0518 virtual Value *
0519 SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts,
0520 unsigned Depth = 0,
0521 bool AllowMultipleUsers = false) = 0;
0522
0523 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
0524 };
0525
0526 }
0527
0528 #undef DEBUG_TYPE
0529
0530 #endif