Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2026-05-10 08:44:40

0001 //===- LowerTypeTests.h - type metadata lowering pass -----------*- C++ -*-===//
0002 //
0003 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
0004 // See https://llvm.org/LICENSE.txt for license information.
0005 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
0006 //
0007 //===----------------------------------------------------------------------===//
0008 //
0009 // This file defines parts of the type test lowering pass implementation that
0010 // may be usefully unit tested.
0011 //
0012 //===----------------------------------------------------------------------===//
0013 
0014 #ifndef LLVM_TRANSFORMS_IPO_LOWERTYPETESTS_H
0015 #define LLVM_TRANSFORMS_IPO_LOWERTYPETESTS_H
0016 
0017 #include "llvm/ADT/SmallVector.h"
0018 #include "llvm/IR/PassManager.h"
0019 #include <cstdint>
0020 #include <cstring>
0021 #include <limits>
0022 #include <set>
0023 #include <vector>
0024 
0025 namespace llvm {
0026 
0027 class Module;
0028 class ModuleSummaryIndex;
0029 class raw_ostream;
0030 
0031 namespace lowertypetests {
0032 
0033 struct BitSetInfo {
0034   // The indices of the set bits in the bitset.
0035   std::set<uint64_t> Bits;
0036 
0037   // The byte offset into the combined global represented by the bitset.
0038   uint64_t ByteOffset;
0039 
0040   // The size of the bitset in bits.
0041   uint64_t BitSize;
0042 
0043   // Log2 alignment of the bit set relative to the combined global.
0044   // For example, a log2 alignment of 3 means that bits in the bitset
0045   // represent addresses 8 bytes apart.
0046   unsigned AlignLog2;
0047 
0048   bool isSingleOffset() const {
0049     return Bits.size() == 1;
0050   }
0051 
0052   bool isAllOnes() const {
0053     return Bits.size() == BitSize;
0054   }
0055 
0056   bool containsGlobalOffset(uint64_t Offset) const;
0057 
0058   void print(raw_ostream &OS) const;
0059 };
0060 
0061 struct BitSetBuilder {
0062   SmallVector<uint64_t, 16> Offsets;
0063   uint64_t Min = std::numeric_limits<uint64_t>::max();
0064   uint64_t Max = 0;
0065 
0066   BitSetBuilder() = default;
0067 
0068   void addOffset(uint64_t Offset) {
0069     if (Min > Offset)
0070       Min = Offset;
0071     if (Max < Offset)
0072       Max = Offset;
0073 
0074     Offsets.push_back(Offset);
0075   }
0076 
0077   BitSetInfo build();
0078 };
0079 
0080 /// This class implements a layout algorithm for globals referenced by bit sets
0081 /// that tries to keep members of small bit sets together. This can
0082 /// significantly reduce bit set sizes in many cases.
0083 ///
0084 /// It works by assembling fragments of layout from sets of referenced globals.
0085 /// Each set of referenced globals causes the algorithm to create a new
0086 /// fragment, which is assembled by appending each referenced global in the set
0087 /// into the fragment. If a referenced global has already been referenced by an
0088 /// fragment created earlier, we instead delete that fragment and append its
0089 /// contents into the fragment we are assembling.
0090 ///
0091 /// By starting with the smallest fragments, we minimize the size of the
0092 /// fragments that are copied into larger fragments. This is most intuitively
0093 /// thought about when considering the case where the globals are virtual tables
0094 /// and the bit sets represent their derived classes: in a single inheritance
0095 /// hierarchy, the optimum layout would involve a depth-first search of the
0096 /// class hierarchy (and in fact the computed layout ends up looking a lot like
0097 /// a DFS), but a naive DFS would not work well in the presence of multiple
0098 /// inheritance. This aspect of the algorithm ends up fitting smaller
0099 /// hierarchies inside larger ones where that would be beneficial.
0100 ///
0101 /// For example, consider this class hierarchy:
0102 ///
0103 /// A       B
0104 ///   \   / | \
0105 ///     C   D   E
0106 ///
0107 /// We have five bit sets: bsA (A, C), bsB (B, C, D, E), bsC (C), bsD (D) and
0108 /// bsE (E). If we laid out our objects by DFS traversing B followed by A, our
0109 /// layout would be {B, C, D, E, A}. This is optimal for bsB as it needs to
0110 /// cover the only 4 objects in its hierarchy, but not for bsA as it needs to
0111 /// cover 5 objects, i.e. the entire layout. Our algorithm proceeds as follows:
0112 ///
0113 /// Add bsC, fragments {{C}}
0114 /// Add bsD, fragments {{C}, {D}}
0115 /// Add bsE, fragments {{C}, {D}, {E}}
0116 /// Add bsA, fragments {{A, C}, {D}, {E}}
0117 /// Add bsB, fragments {{B, A, C, D, E}}
0118 ///
0119 /// This layout is optimal for bsA, as it now only needs to cover two (i.e. 3
0120 /// fewer) objects, at the cost of bsB needing to cover 1 more object.
0121 ///
0122 /// The bit set lowering pass assigns an object index to each object that needs
0123 /// to be laid out, and calls addFragment for each bit set passing the object
0124 /// indices of its referenced globals. It then assembles a layout from the
0125 /// computed layout in the Fragments field.
0126 struct GlobalLayoutBuilder {
0127   /// The computed layout. Each element of this vector contains a fragment of
0128   /// layout (which may be empty) consisting of object indices.
0129   std::vector<std::vector<uint64_t>> Fragments;
0130 
0131   /// Mapping from object index to fragment index.
0132   std::vector<uint64_t> FragmentMap;
0133 
0134   GlobalLayoutBuilder(uint64_t NumObjects)
0135       : Fragments(1), FragmentMap(NumObjects) {}
0136 
0137   /// Add F to the layout while trying to keep its indices contiguous.
0138   /// If a previously seen fragment uses any of F's indices, that
0139   /// fragment will be laid out inside F.
0140   void addFragment(const std::set<uint64_t> &F);
0141 };
0142 
0143 /// This class is used to build a byte array containing overlapping bit sets. By
0144 /// loading from indexed offsets into the byte array and applying a mask, a
0145 /// program can test bits from the bit set with a relatively short instruction
0146 /// sequence. For example, suppose we have 15 bit sets to lay out:
0147 ///
0148 /// A (16 bits), B (15 bits), C (14 bits), D (13 bits), E (12 bits),
0149 /// F (11 bits), G (10 bits), H (9 bits), I (7 bits), J (6 bits), K (5 bits),
0150 /// L (4 bits), M (3 bits), N (2 bits), O (1 bit)
0151 ///
0152 /// These bits can be laid out in a 16-byte array like this:
0153 ///
0154 ///       Byte Offset
0155 ///     0123456789ABCDEF
0156 /// Bit
0157 ///   7 HHHHHHHHHIIIIIII
0158 ///   6 GGGGGGGGGGJJJJJJ
0159 ///   5 FFFFFFFFFFFKKKKK
0160 ///   4 EEEEEEEEEEEELLLL
0161 ///   3 DDDDDDDDDDDDDMMM
0162 ///   2 CCCCCCCCCCCCCCNN
0163 ///   1 BBBBBBBBBBBBBBBO
0164 ///   0 AAAAAAAAAAAAAAAA
0165 ///
0166 /// For example, to test bit X of A, we evaluate ((bits[X] & 1) != 0), or to
0167 /// test bit X of I, we evaluate ((bits[9 + X] & 0x80) != 0). This can be done
0168 /// in 1-2 machine instructions on x86, or 4-6 instructions on ARM.
0169 ///
0170 /// This is a byte array, rather than (say) a 2-byte array or a 4-byte array,
0171 /// because for one thing it gives us better packing (the more bins there are,
0172 /// the less evenly they will be filled), and for another, the instruction
0173 /// sequences can be slightly shorter, both on x86 and ARM.
0174 struct ByteArrayBuilder {
0175   /// The byte array built so far.
0176   std::vector<uint8_t> Bytes;
0177 
0178   enum { BitsPerByte = 8 };
0179 
0180   /// The number of bytes allocated so far for each of the bits.
0181   uint64_t BitAllocs[BitsPerByte];
0182 
0183   ByteArrayBuilder() {
0184     memset(BitAllocs, 0, sizeof(BitAllocs));
0185   }
0186 
0187   /// Allocate BitSize bits in the byte array where Bits contains the bits to
0188   /// set. AllocByteOffset is set to the offset within the byte array and
0189   /// AllocMask is set to the bitmask for those bits. This uses the LPT (Longest
0190   /// Processing Time) multiprocessor scheduling algorithm to lay out the bits
0191   /// efficiently; the pass allocates bit sets in decreasing size order.
0192   void allocate(const std::set<uint64_t> &Bits, uint64_t BitSize,
0193                 uint64_t &AllocByteOffset, uint8_t &AllocMask);
0194 };
0195 
0196 bool isJumpTableCanonical(Function *F);
0197 
0198 /// Specifies how to drop type tests.
0199 enum class DropTestKind {
0200   None,   /// Do not drop type tests (default).
0201   Assume, /// Drop only llvm.assumes using type test value.
0202   All,    /// Drop the type test and all uses.
0203 };
0204 
0205 } // end namespace lowertypetests
0206 
0207 class LowerTypeTestsPass : public PassInfoMixin<LowerTypeTestsPass> {
0208   bool UseCommandLine = false;
0209 
0210   ModuleSummaryIndex *ExportSummary = nullptr;
0211   const ModuleSummaryIndex *ImportSummary = nullptr;
0212   lowertypetests::DropTestKind DropTypeTests =
0213       lowertypetests::DropTestKind::None;
0214 
0215 public:
0216   LowerTypeTestsPass() : UseCommandLine(true) {}
0217   LowerTypeTestsPass(ModuleSummaryIndex *ExportSummary,
0218                      const ModuleSummaryIndex *ImportSummary,
0219                      lowertypetests::DropTestKind DropTypeTests =
0220                          lowertypetests::DropTestKind::None)
0221       : ExportSummary(ExportSummary), ImportSummary(ImportSummary),
0222         DropTypeTests(DropTypeTests) {}
0223   PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
0224 };
0225 
0226 } // end namespace llvm
0227 
0228 #endif // LLVM_TRANSFORMS_IPO_LOWERTYPETESTS_H