Back to home page

EIC code displayed by LXR

 
 

    


Warning, /include/llvm/IR/IntrinsicsAMDGPU.td is written in an unsupported language. File is not indexed.

0001 //===- IntrinsicsAMDGPU.td - Defines AMDGPU intrinsics -----*- tablegen -*-===//
0002 //
0003 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
0004 // See https://llvm.org/LICENSE.txt for license information.
0005 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
0006 //
0007 //===----------------------------------------------------------------------===//
0008 //
0009 // This file defines all of the R600-specific intrinsics.
0010 //
0011 //===----------------------------------------------------------------------===//
0012 
0013 def global_ptr_ty : LLVMQualPointerType<1>;
0014 def local_ptr_ty : LLVMQualPointerType<3>;
0015 
0016 // The amdgpu-no-* attributes (ex amdgpu-no-workitem-id-z) typically inferred
0017 // by the backend cause whole-program undefined behavior when violated, such as
0018 // by causing all other preload register intrinsics to return arbitrarily incorrect
0019 // values. In non-entry-point functions, attempting to call a function that needs
0020 // some preloaded register from a function that is known to not need it is a violation
0021 // of the calling convention and also program-level UB. Outside of such IR-level UB,
0022 // these preloaded registers are always set to a well-defined value and are thus `noundef`.
0023 class AMDGPUReadPreloadRegisterIntrinsic
0024   : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [NoUndef<RetIndex>, IntrNoMem, IntrSpeculatable]>;
0025 
0026 class AMDGPUReadPreloadRegisterIntrinsicNamed<string name>
0027   : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [NoUndef<RetIndex>, IntrNoMem, IntrSpeculatable]>, ClangBuiltin<name>;
0028 
0029 // Used to tag image and resource intrinsics with information used to generate
0030 // mem operands.
0031 class AMDGPURsrcIntrinsic<int rsrcarg, bit isimage = false> {
0032   int RsrcArg = rsrcarg;
0033   bit IsImage = isimage;
0034 }
0035 
0036 let TargetPrefix = "r600" in {
0037 
0038 multiclass AMDGPUReadPreloadRegisterIntrinsic_xyz {
0039   def _x : AMDGPUReadPreloadRegisterIntrinsic;
0040   def _y : AMDGPUReadPreloadRegisterIntrinsic;
0041   def _z : AMDGPUReadPreloadRegisterIntrinsic;
0042 }
0043 
0044 multiclass AMDGPUReadPreloadRegisterIntrinsic_xyz_named<string prefix> {
0045   def _x : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_x")>;
0046   def _y : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_y")>;
0047   def _z : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_z")>;
0048 }
0049 
0050 defm int_r600_read_global_size : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
0051                                  <"__builtin_r600_read_global_size">;
0052 defm int_r600_read_ngroups : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
0053                              <"__builtin_r600_read_ngroups">;
0054 defm int_r600_read_tgid : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
0055                           <"__builtin_r600_read_tgid">;
0056 
0057 defm int_r600_read_local_size : AMDGPUReadPreloadRegisterIntrinsic_xyz;
0058 defm int_r600_read_tidig : AMDGPUReadPreloadRegisterIntrinsic_xyz;
0059 
0060 def int_r600_group_barrier : ClangBuiltin<"__builtin_r600_group_barrier">,
0061   Intrinsic<[], [], [IntrConvergent, IntrWillReturn]>;
0062 
0063 // AS 7 is PARAM_I_ADDRESS, used for kernel arguments
0064 def int_r600_implicitarg_ptr :
0065   ClangBuiltin<"__builtin_r600_implicitarg_ptr">,
0066   DefaultAttrsIntrinsic<[LLVMQualPointerType<7>], [],
0067   [NoUndef<RetIndex>, IntrNoMem, IntrSpeculatable]>;
0068 
0069 def int_r600_rat_store_typed :
0070   // 1st parameter: Data
0071   // 2nd parameter: Index
0072   // 3rd parameter: Constant RAT ID
0073   DefaultAttrsIntrinsic<[], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], []>,
0074   ClangBuiltin<"__builtin_r600_rat_store_typed">;
0075 
0076 def int_r600_recipsqrt_ieee :  DefaultAttrsIntrinsic<
0077   [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
0078 >;
0079 
0080 def int_r600_recipsqrt_clamped : DefaultAttrsIntrinsic<
0081   [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
0082 >;
0083 
0084 def int_r600_cube : DefaultAttrsIntrinsic<
0085   [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable]
0086 >;
0087 
0088 def int_r600_store_stream_output : DefaultAttrsIntrinsic<
0089   [], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []
0090 >;
0091 
0092 class TextureIntrinsicFloatInput : DefaultAttrsIntrinsic<[llvm_v4f32_ty], [
0093   llvm_v4f32_ty, // Coord
0094   llvm_i32_ty,   // offset_x
0095   llvm_i32_ty,   // offset_y,
0096   llvm_i32_ty,   // offset_z,
0097   llvm_i32_ty,   // resource_id
0098   llvm_i32_ty,   // samplerid
0099   llvm_i32_ty,   // coord_type_x
0100   llvm_i32_ty,   // coord_type_y
0101   llvm_i32_ty,   // coord_type_z
0102   llvm_i32_ty],  // coord_type_w
0103   [IntrNoMem]
0104 >;
0105 
0106 class TextureIntrinsicInt32Input : DefaultAttrsIntrinsic<[llvm_v4i32_ty], [
0107     llvm_v4i32_ty, // Coord
0108     llvm_i32_ty,   // offset_x
0109     llvm_i32_ty,   // offset_y,
0110     llvm_i32_ty,   // offset_z,
0111     llvm_i32_ty,   // resource_id
0112     llvm_i32_ty,   // samplerid
0113     llvm_i32_ty,   // coord_type_x
0114     llvm_i32_ty,   // coord_type_y
0115     llvm_i32_ty,   // coord_type_z
0116     llvm_i32_ty],  // coord_type_w
0117     [IntrNoMem]
0118 >;
0119 
0120 def int_r600_store_swizzle :
0121   Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], [IntrWillReturn, IntrNoCallback, IntrNoFree]
0122 >;
0123 
0124 def int_r600_tex : TextureIntrinsicFloatInput;
0125 def int_r600_texc : TextureIntrinsicFloatInput;
0126 def int_r600_txl : TextureIntrinsicFloatInput;
0127 def int_r600_txlc : TextureIntrinsicFloatInput;
0128 def int_r600_txb : TextureIntrinsicFloatInput;
0129 def int_r600_txbc : TextureIntrinsicFloatInput;
0130 def int_r600_txf : TextureIntrinsicInt32Input;
0131 def int_r600_txq : TextureIntrinsicInt32Input;
0132 def int_r600_ddx : TextureIntrinsicFloatInput;
0133 def int_r600_ddy : TextureIntrinsicFloatInput;
0134 
0135 def int_r600_dot4 : DefaultAttrsIntrinsic<[llvm_float_ty],
0136   [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable]
0137 >;
0138 
0139 def int_r600_kill : DefaultAttrsIntrinsic<[], [llvm_float_ty], []>;
0140 
0141 } // End TargetPrefix = "r600"
0142 
0143 let TargetPrefix = "amdgcn" in {
0144 
0145 //===----------------------------------------------------------------------===//
0146 // ABI Special Intrinsics
0147 //===----------------------------------------------------------------------===//
0148 
0149 defm int_amdgcn_workitem_id : AMDGPUReadPreloadRegisterIntrinsic_xyz;
0150 defm int_amdgcn_workgroup_id : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
0151                                <"__builtin_amdgcn_workgroup_id">;
0152 
0153 def int_amdgcn_dispatch_ptr :
0154   DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
0155   [Align<RetIndex, 4>, NoUndef<RetIndex>, NonNull<RetIndex>, IntrNoMem, IntrSpeculatable]>;
0156 
0157 def int_amdgcn_queue_ptr :
0158   ClangBuiltin<"__builtin_amdgcn_queue_ptr">,
0159   DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
0160   [Align<RetIndex, 4>, NoUndef<RetIndex>, NonNull<RetIndex>, IntrNoMem, IntrSpeculatable]>;
0161 
0162 def int_amdgcn_kernarg_segment_ptr :
0163   ClangBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">,
0164   DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
0165   [Align<RetIndex, 4>, NoUndef<RetIndex>, IntrNoMem, IntrSpeculatable]>;
0166 
0167 def int_amdgcn_implicitarg_ptr :
0168   ClangBuiltin<"__builtin_amdgcn_implicitarg_ptr">,
0169   DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
0170   [Align<RetIndex, 4>, NoUndef<RetIndex>, IntrNoMem, IntrSpeculatable]>;
0171 
0172 // Returns the amount of LDS statically allocated for this program.
0173 // This is no longer guaranteed to be a compile-time constant due to linking
0174 // support.
0175 def int_amdgcn_groupstaticsize :
0176   ClangBuiltin<"__builtin_amdgcn_groupstaticsize">,
0177   DefaultAttrsIntrinsic<[llvm_i32_ty], [], [NoUndef<RetIndex>, IntrNoMem, IntrSpeculatable]>;
0178 
0179 def int_amdgcn_dispatch_id :
0180   ClangBuiltin<"__builtin_amdgcn_dispatch_id">,
0181   DefaultAttrsIntrinsic<[llvm_i64_ty], [], [NoUndef<RetIndex>, IntrNoMem, IntrSpeculatable]>;
0182 
0183 // For internal use. Coordinates LDS lowering between IR transform and backend.
0184 def int_amdgcn_lds_kernel_id :
0185   DefaultAttrsIntrinsic<[llvm_i32_ty], [], [NoUndef<RetIndex>, IntrNoMem, IntrSpeculatable]>;
0186 
0187 def int_amdgcn_implicit_buffer_ptr :
0188   ClangBuiltin<"__builtin_amdgcn_implicit_buffer_ptr">,
0189   DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
0190   [Align<RetIndex, 4>, NoUndef<RetIndex>,
0191     IntrNoMem, IntrSpeculatable]>;
0192 
0193 // Set EXEC to the 64-bit value given.
0194 // This is always moved to the beginning of the basic block.
0195 // FIXME: Should be mangled for wave size.
0196 def int_amdgcn_init_exec : Intrinsic<[],
0197   [llvm_i64_ty],      // 64-bit literal constant
0198   [IntrConvergent, IntrNoMem, IntrHasSideEffects, IntrNoCallback,
0199    IntrNoFree, IntrWillReturn, ImmArg<ArgIndex<0>>]>;
0200 
0201 // Set EXEC according to a thread count packed in an SGPR input:
0202 //    thread_count = (input >> bitoffset) & 0x7f;
0203 // This is always moved to the beginning of the basic block.
0204 // Note: only inreg arguments to the parent function are valid as
0205 // inputs to this intrinsic, computed values cannot be used.
0206 def int_amdgcn_init_exec_from_input : Intrinsic<[],
0207   [llvm_i32_ty,       // 32-bit SGPR input
0208    llvm_i32_ty],      // bit offset of the thread count
0209   [IntrConvergent, IntrHasSideEffects, IntrNoMem, IntrNoCallback,
0210    IntrNoFree, IntrWillReturn, ImmArg<ArgIndex<1>>]>;
0211 
0212 // Sets the function into whole-wave-mode and returns whether the lane was
0213 // active when entering the function. A branch depending on this return will
0214 // revert the EXEC mask to what it was when entering the function, thus
0215 // resulting in a no-op. This pattern is used to optimize branches when function
0216 // tails need to be run in whole-wave-mode. It may also have other consequences
0217 // (mostly related to WWM CSR handling) that differentiate it from using
0218 // a plain `amdgcn.init.exec -1`.
0219 def int_amdgcn_init_whole_wave : Intrinsic<[llvm_i1_ty], [], [
0220     IntrHasSideEffects, IntrNoMem, IntrConvergent]>;
0221 
0222 def int_amdgcn_wavefrontsize :
0223   ClangBuiltin<"__builtin_amdgcn_wavefrontsize">,
0224   DefaultAttrsIntrinsic<[llvm_i32_ty], [], [NoUndef<RetIndex>, IntrNoMem, IntrSpeculatable]>;
0225 
0226 // Represent a relocation constant.
0227 def int_amdgcn_reloc_constant : DefaultAttrsIntrinsic<
0228   [llvm_i32_ty], [llvm_metadata_ty],
0229   [IntrNoMem, IntrSpeculatable]
0230 >;
0231 
0232 //===----------------------------------------------------------------------===//
0233 // Instruction Intrinsics
0234 //===----------------------------------------------------------------------===//
0235 
0236 // The first parameter is s_sendmsg immediate (i16),
0237 // the second one is copied to m0
0238 def int_amdgcn_s_sendmsg : ClangBuiltin<"__builtin_amdgcn_s_sendmsg">,
0239   Intrinsic <[], [llvm_i32_ty, llvm_i32_ty],
0240   [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects]>;
0241 def int_amdgcn_s_sendmsghalt : ClangBuiltin<"__builtin_amdgcn_s_sendmsghalt">,
0242   Intrinsic <[], [llvm_i32_ty, llvm_i32_ty],
0243   [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects]>;
0244 
0245 
0246 // gfx11 intrinsic
0247 // The first parameter is s_sendmsg immediate (i16). Return type is i32 or i64.
0248 def int_amdgcn_s_sendmsg_rtn : Intrinsic <[llvm_anyint_ty], [llvm_i32_ty],
0249   [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects]>;
0250 
0251 // Vanilla workgroup sync-barrier
0252 def int_amdgcn_s_barrier : ClangBuiltin<"__builtin_amdgcn_s_barrier">,
0253   Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
0254 
0255 // Lower-level split-barrier intrinsics
0256 
0257 // void @llvm.amdgcn.s.barrier.signal(i32 %barrierType)
0258 // only for non-named barrier
0259 def int_amdgcn_s_barrier_signal : ClangBuiltin<"__builtin_amdgcn_s_barrier_signal">,
0260   Intrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn,
0261                                 IntrNoCallback, IntrNoFree]>;
0262 
0263 // void @llvm.amdgcn.s.barrier.signal.var(ptr addrspace(3) %barrier, i32 %memberCnt)
0264 // The %barrier and %memberCnt argument must be uniform, otherwise behavior is undefined.
0265 def int_amdgcn_s_barrier_signal_var : ClangBuiltin<"__builtin_amdgcn_s_barrier_signal_var">,
0266   Intrinsic<[], [local_ptr_ty, llvm_i32_ty], [IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn,
0267                                 IntrNoCallback, IntrNoFree]>;
0268 
0269 // bool @llvm.amdgcn.s.barrier.signal.isfirst(i32 %barrierType)
0270 // only for non-named barrier
0271 def int_amdgcn_s_barrier_signal_isfirst : ClangBuiltin<"__builtin_amdgcn_s_barrier_signal_isfirst">,
0272   Intrinsic<[llvm_i1_ty], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects, IntrConvergent,
0273                                 IntrWillReturn, IntrNoCallback, IntrNoFree]>;
0274 
0275 // void @llvm.amdgcn.s.barrier.init(ptr addrspace(3) %barrier, i32 %memberCnt)
0276 // The %barrier and %memberCnt argument must be uniform, otherwise behavior is undefined.
0277 def int_amdgcn_s_barrier_init : ClangBuiltin<"__builtin_amdgcn_s_barrier_init">,
0278   Intrinsic<[], [local_ptr_ty, llvm_i32_ty], [IntrNoMem, IntrHasSideEffects, IntrConvergent,
0279                                 IntrWillReturn, IntrNoCallback, IntrNoFree]>;
0280 
0281 // void @llvm.amdgcn.s.barrier.join(ptr addrspace(3) %barrier)
0282 // The %barrier argument must be uniform, otherwise behavior is undefined.
0283 def int_amdgcn_s_barrier_join : ClangBuiltin<"__builtin_amdgcn_s_barrier_join">,
0284   Intrinsic<[], [local_ptr_ty], [IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn,
0285                                 IntrNoCallback, IntrNoFree]>;
0286 
0287 // void @llvm.amdgcn.s.barrier.wait(i16 %barrierType)
0288 def int_amdgcn_s_barrier_wait : ClangBuiltin<"__builtin_amdgcn_s_barrier_wait">,
0289   Intrinsic<[], [llvm_i16_ty], [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects, IntrConvergent,
0290                                 IntrWillReturn, IntrNoCallback, IntrNoFree]>;
0291 
0292 // void @llvm.amdgcn.s.barrier.leave(i16 %barrierType)
0293 def int_amdgcn_s_barrier_leave : ClangBuiltin<"__builtin_amdgcn_s_barrier_leave">,
0294   Intrinsic<[], [llvm_i16_ty], [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects, IntrConvergent,
0295                                 IntrWillReturn, IntrNoCallback, IntrNoFree]>;
0296 
0297 // uint32_t @llvm.amdgcn.s.get.barrier.state(i32 %barrierId)
0298 // The %barrierType argument must be uniform, otherwise behavior is undefined.
0299 def int_amdgcn_s_get_barrier_state : ClangBuiltin<"__builtin_amdgcn_s_get_barrier_state">,
0300   Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn,
0301                                 IntrNoCallback, IntrNoFree]>;
0302 
0303 // uint32_t @llvm.amdgcn.s.get.named.barrier.state(ptr addrspace(3) %barrier)
0304 // The %barrier argument must be uniform, otherwise behavior is undefined.
0305 def int_amdgcn_s_get_named_barrier_state : ClangBuiltin<"__builtin_amdgcn_s_get_named_barrier_state">,
0306   Intrinsic<[llvm_i32_ty], [local_ptr_ty], [IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn,
0307                                 IntrNoCallback, IntrNoFree]>;
0308 
0309 def int_amdgcn_wave_barrier : ClangBuiltin<"__builtin_amdgcn_wave_barrier">,
0310   Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
0311 
0312 // The 1st parameter is a mask for the types of instructions that may be allowed
0313 // to cross the SCHED_BARRIER during scheduling.
0314 //     MASK = 0x0000 0000: No instructions may be scheduled across SCHED_BARRIER.
0315 //     MASK = 0x0000 0001: ALL, non-memory, non-side-effect producing instructions may be
0316 //                         scheduled across SCHED_BARRIER, i.e. allow ALU instructions to pass.
0317 //     MASK = 0x0000 0002: VALU instructions may be scheduled across SCHED_BARRIER.
0318 //     MASK = 0x0000 0004: SALU instructions may be scheduled across SCHED_BARRIER.
0319 //     MASK = 0x0000 0008: MFMA/WMMA instructions may be scheduled across SCHED_BARRIER.
0320 //     MASK = 0x0000 0010: ALL VMEM instructions may be scheduled across SCHED_BARRIER.
0321 //     MASK = 0x0000 0020: VMEM read instructions may be scheduled across SCHED_BARRIER.
0322 //     MASK = 0x0000 0040: VMEM write instructions may be scheduled across SCHED_BARRIER.
0323 //     MASK = 0x0000 0080: ALL DS instructions may be scheduled across SCHED_BARRIER.
0324 //     MASK = 0x0000 0100: ALL DS read instructions may be scheduled accoss SCHED_BARRIER.
0325 //     MASK = 0x0000 0200: ALL DS write instructions may be scheduled across SCHED_BARRIER.
0326 def int_amdgcn_sched_barrier : ClangBuiltin<"__builtin_amdgcn_sched_barrier">,
0327   Intrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects, IntrConvergent,
0328                                 IntrWillReturn, IntrNoCallback, IntrNoFree]>;
0329 
0330 // The first parameter is a mask that determines the types of instructions that
0331 // you would like to synchronize around and add to a scheduling group. The
0332 // values of the mask are defined above for sched_barrier. These instructions
0333 // will be selected from the bottom up starting from the sched_group_barrier's
0334 // location during instruction scheduling. The second parameter is the number of
0335 // matching instructions that will be associated with this sched_group_barrier.
0336 // The third parameter is an identifier which is used to describe what other
0337 // sched_group_barriers should be synchronized with.
0338 def int_amdgcn_sched_group_barrier : ClangBuiltin<"__builtin_amdgcn_sched_group_barrier">,
0339   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
0340   [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, IntrNoMem, IntrHasSideEffects,
0341    IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
0342 
0343 // Scheduler optimization hint.
0344 //     MASK = 0: Small gemm opt
0345 def int_amdgcn_iglp_opt : ClangBuiltin<"__builtin_amdgcn_iglp_opt">,
0346   Intrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects, IntrConvergent,
0347                                 IntrWillReturn, IntrNoCallback, IntrNoFree]>;
0348 
0349 def int_amdgcn_s_waitcnt : ClangBuiltin<"__builtin_amdgcn_s_waitcnt">,
0350   Intrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
0351 
0352 // GFX12 intrinsics
0353 class AMDGPUWaitIntrinsic :
0354   Intrinsic<[], [llvm_i16_ty], [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
0355 def int_amdgcn_s_wait_bvhcnt         : AMDGPUWaitIntrinsic;
0356 def int_amdgcn_s_wait_dscnt          : AMDGPUWaitIntrinsic;
0357 def int_amdgcn_s_wait_expcnt         : AMDGPUWaitIntrinsic;
0358 def int_amdgcn_s_wait_kmcnt          : AMDGPUWaitIntrinsic;
0359 def int_amdgcn_s_wait_loadcnt        : AMDGPUWaitIntrinsic;
0360 def int_amdgcn_s_wait_samplecnt      : AMDGPUWaitIntrinsic;
0361 def int_amdgcn_s_wait_storecnt       : AMDGPUWaitIntrinsic;
0362 
0363 def int_amdgcn_div_scale : DefaultAttrsIntrinsic<
0364   // 1st parameter: Numerator
0365   // 2nd parameter: Denominator
0366   // 3rd parameter: Select quotient. Must equal Numerator or Denominator.
0367   //                (0 = Denominator, 1 = Numerator).
0368   [llvm_anyfloat_ty, llvm_i1_ty],
0369   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
0370   [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]
0371 >;
0372 
0373 def int_amdgcn_div_fmas : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
0374   [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
0375   [IntrNoMem, IntrSpeculatable]
0376 >;
0377 
0378 def int_amdgcn_div_fixup : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
0379   [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
0380   [IntrNoMem, IntrSpeculatable]
0381 >;
0382 
0383 // Look Up 2.0 / pi src0 with segment select src1[4:0]
0384 def int_amdgcn_trig_preop : DefaultAttrsIntrinsic<
0385   [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
0386   [IntrNoMem, IntrSpeculatable]
0387 >;
0388 
0389 def int_amdgcn_sin : DefaultAttrsIntrinsic<
0390   [llvm_anyfloat_ty], [LLVMMatchType<0>],
0391   [IntrNoMem, IntrSpeculatable]
0392 >;
0393 
0394 def int_amdgcn_cos : DefaultAttrsIntrinsic<
0395   [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
0396 >;
0397 
0398 // v_log_{f16|f32}, performs log2. f32 version does not handle
0399 // denormals. There is no reason to use this for f16 as it does
0400 // support denormals, and the generic log2 intrinsic should be
0401 // preferred.
0402 def int_amdgcn_log : DefaultAttrsIntrinsic<
0403   [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
0404 >;
0405 
0406 // v_exp_{f16|f32} (int_amdgcn_exp was taken by export
0407 // already). Performs exp2. f32 version does not handle
0408 // denormals. There is no reason to use this for f16 as it does
0409 // support denormals, and the generic exp2 intrinsic should be
0410 // preferred.
0411 def int_amdgcn_exp2 : DefaultAttrsIntrinsic<
0412   [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
0413 >;
0414 
0415 def int_amdgcn_log_clamp : DefaultAttrsIntrinsic<
0416   [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
0417 >;
0418 
0419 def int_amdgcn_fmul_legacy : ClangBuiltin<"__builtin_amdgcn_fmul_legacy">,
0420   DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
0421   [IntrNoMem, IntrSpeculatable, Commutative]
0422 >;
0423 
0424 // Fused single-precision multiply-add with legacy behaviour for the multiply,
0425 // which is that +/- 0.0 * anything (even NaN or infinity) is +0.0. This is
0426 // intended for use on subtargets that have the v_fma_legacy_f32 and/or
0427 // v_fmac_legacy_f32 instructions. (Note that v_fma_legacy_f16 is unrelated and
0428 // has a completely different kind of legacy behaviour.)
0429 def int_amdgcn_fma_legacy :
0430   DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
0431   [IntrNoMem, IntrSpeculatable, Commutative]
0432 >;
0433 
0434 def int_amdgcn_rcp : DefaultAttrsIntrinsic<
0435   [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
0436 >;
0437 
0438 def int_amdgcn_rcp_legacy : ClangBuiltin<"__builtin_amdgcn_rcp_legacy">,
0439   DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty],
0440   [IntrNoMem, IntrSpeculatable]
0441 >;
0442 
0443 def int_amdgcn_sqrt :  DefaultAttrsIntrinsic<
0444   [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
0445 >;
0446 
0447 def int_amdgcn_rsq :  DefaultAttrsIntrinsic<
0448   [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
0449 >;
0450 
0451 def int_amdgcn_rsq_legacy :  ClangBuiltin<"__builtin_amdgcn_rsq_legacy">,
0452   DefaultAttrsIntrinsic<
0453   [llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]
0454 >;
0455 
0456 // out = 1.0 / sqrt(a) result clamped to +/- max_float.
0457 def int_amdgcn_rsq_clamp : DefaultAttrsIntrinsic<
0458   [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]>;
0459 
0460 def int_amdgcn_frexp_mant : DefaultAttrsIntrinsic<
0461   [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
0462 >;
0463 
0464 def int_amdgcn_frexp_exp : DefaultAttrsIntrinsic<
0465   [llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem, IntrSpeculatable]
0466 >;
0467 
0468 // v_fract is buggy on SI/CI. It mishandles infinities, may return 1.0
0469 // and always uses rtz, so is not suitable for implementing the OpenCL
0470 // fract function. It should be ok on VI.
0471 def int_amdgcn_fract : DefaultAttrsIntrinsic<
0472   [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
0473 >;
0474 
0475 def int_amdgcn_cvt_pkrtz : ClangBuiltin<"__builtin_amdgcn_cvt_pkrtz">,
0476   DefaultAttrsIntrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty],
0477             [IntrNoMem, IntrSpeculatable]
0478 >;
0479 
0480 def int_amdgcn_cvt_pknorm_i16 :
0481   ClangBuiltin<"__builtin_amdgcn_cvt_pknorm_i16">,
0482   DefaultAttrsIntrinsic<[llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty],
0483             [IntrNoMem, IntrSpeculatable]
0484 >;
0485 
0486 def int_amdgcn_cvt_pknorm_u16 :
0487   ClangBuiltin<"__builtin_amdgcn_cvt_pknorm_u16">,
0488   DefaultAttrsIntrinsic<[llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty],
0489             [IntrNoMem, IntrSpeculatable]
0490 >;
0491 
0492 def int_amdgcn_cvt_pk_i16 :
0493     ClangBuiltin<"__builtin_amdgcn_cvt_pk_i16">,
0494     DefaultAttrsIntrinsic<
0495   [llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty],
0496   [IntrNoMem, IntrSpeculatable]
0497 >;
0498 
0499 def int_amdgcn_cvt_pk_u16 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_u16">,
0500   DefaultAttrsIntrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty],
0501     [IntrNoMem, IntrSpeculatable]
0502 >;
0503 
0504 def int_amdgcn_class : DefaultAttrsIntrinsic<
0505   [llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty],
0506   [IntrNoMem, IntrSpeculatable]
0507 >;
0508 
0509 def int_amdgcn_fmed3 :
0510   DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
0511     [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
0512     [IntrNoMem, IntrSpeculatable]
0513 >;
0514 
0515 def int_amdgcn_cubeid : ClangBuiltin<"__builtin_amdgcn_cubeid">,
0516   DefaultAttrsIntrinsic<[llvm_float_ty],
0517     [llvm_float_ty, llvm_float_ty, llvm_float_ty],
0518     [IntrNoMem, IntrSpeculatable]
0519 >;
0520 
0521 def int_amdgcn_cubema : ClangBuiltin<"__builtin_amdgcn_cubema">,
0522   DefaultAttrsIntrinsic<[llvm_float_ty],
0523   [llvm_float_ty, llvm_float_ty, llvm_float_ty],
0524   [IntrNoMem, IntrSpeculatable]
0525 >;
0526 
0527 def int_amdgcn_cubesc : ClangBuiltin<"__builtin_amdgcn_cubesc">,
0528   DefaultAttrsIntrinsic<[llvm_float_ty],
0529     [llvm_float_ty, llvm_float_ty, llvm_float_ty],
0530     [IntrNoMem, IntrSpeculatable]
0531 >;
0532 
0533 def int_amdgcn_cubetc : ClangBuiltin<"__builtin_amdgcn_cubetc">,
0534   DefaultAttrsIntrinsic<[llvm_float_ty],
0535     [llvm_float_ty, llvm_float_ty, llvm_float_ty],
0536     [IntrNoMem, IntrSpeculatable]
0537 >;
0538 
0539 // v_ffbh_i32, as opposed to v_ffbh_u32. For v_ffbh_u32, llvm.ctlz
0540 // should be used.
0541 def int_amdgcn_sffbh :
0542   DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>],
0543   [IntrNoMem, IntrSpeculatable]
0544 >;
0545 
0546 // v_mad_f32|f16/v_mac_f32|f16, selected regardless of denorm support.
0547 def int_amdgcn_fmad_ftz :
0548   DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
0549             [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
0550             [IntrNoMem, IntrSpeculatable]
0551 >;
0552 
0553 // FIXME: The m0 argument should be moved after the normal arguments
0554 class AMDGPUDSOrderedIntrinsic : Intrinsic<
0555   [llvm_i32_ty],
0556   // M0 = {hi16:address, lo16:waveID}. Allow passing M0 as a pointer, so that
0557   // the bit packing can be optimized at the IR level.
0558   [LLVMQualPointerType<2>, // IntToPtr(M0)
0559    llvm_i32_ty, // value to add or swap
0560    llvm_i32_ty, // ordering
0561    llvm_i32_ty, // scope
0562    llvm_i1_ty,  // isVolatile
0563    llvm_i32_ty, // ordered count index (OA index), also added to the address
0564                 // gfx10: bits 24-27 indicate the number of active threads/dwords
0565    llvm_i1_ty,  // wave release, usually set to 1
0566    llvm_i1_ty], // wave done, set to 1 for the last ordered instruction
0567   [IntrWillReturn, NoCapture<ArgIndex<0>>,
0568    ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>,
0569    ImmArg<ArgIndex<5>>, ImmArg<ArgIndex<6>>, ImmArg<ArgIndex<7>>, IntrNoCallback, IntrNoFree
0570   ]
0571 >;
0572 
0573 class AMDGPUDSAppendConsumedIntrinsic : Intrinsic<
0574   [llvm_i32_ty],
0575   [llvm_anyptr_ty, // LDS or GDS ptr
0576    llvm_i1_ty], // isVolatile
0577    [IntrConvergent, IntrWillReturn, IntrArgMemOnly,
0578     Align<ArgIndex<0>, 4>, NoCapture<ArgIndex<0>>,
0579     ImmArg<ArgIndex<1>>, IntrNoCallback, IntrNoFree],
0580    "",
0581    [SDNPMemOperand]
0582 >;
0583 
0584 def int_amdgcn_ds_ordered_add : AMDGPUDSOrderedIntrinsic;
0585 def int_amdgcn_ds_ordered_swap : AMDGPUDSOrderedIntrinsic;
0586 
0587 // The pointer argument is assumed to be dynamically uniform if a VGPR.
0588 def int_amdgcn_ds_append : AMDGPUDSAppendConsumedIntrinsic;
0589 def int_amdgcn_ds_consume : AMDGPUDSAppendConsumedIntrinsic;
0590 
0591 class AMDGPUCvtScaleF32Intrinsic<LLVMType DstTy, LLVMType Src0Ty, string name> : DefaultAttrsIntrinsic<
0592   [DstTy], [Src0Ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable]
0593 >, ClangBuiltin<"__builtin_amdgcn_"#name>;
0594 
0595 class AMDGPUCvtScaleF32ToFP6BF6Intrinsic<LLVMType DstTy, LLVMType Src0Ty, LLVMType Src1Ty, string name> : DefaultAttrsIntrinsic<
0596   [DstTy], [Src0Ty, Src1Ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable]
0597 >, ClangBuiltin<"__builtin_amdgcn_"#name>;
0598 
0599 class AMDGPUCvtScaleF32SRIntrinsic<LLVMType DstTy, LLVMType Src0Ty, string name> : DefaultAttrsIntrinsic<
0600   [DstTy], [Src0Ty, llvm_i32_ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable]
0601 >, ClangBuiltin<"__builtin_amdgcn_"#name>;
0602 
0603 def int_amdgcn_cvt_scalef32_pk32_fp6_f16  : AMDGPUCvtScaleF32Intrinsic<llvm_v6i32_ty, llvm_v32f16_ty,  "cvt_scalef32_pk32_fp6_f16">;
0604 def int_amdgcn_cvt_scalef32_pk32_bf6_f16  : AMDGPUCvtScaleF32Intrinsic<llvm_v6i32_ty, llvm_v32f16_ty,  "cvt_scalef32_pk32_bf6_f16">;
0605 def int_amdgcn_cvt_scalef32_pk32_fp6_bf16 : AMDGPUCvtScaleF32Intrinsic<llvm_v6i32_ty, llvm_v32bf16_ty, "cvt_scalef32_pk32_fp6_bf16">;
0606 def int_amdgcn_cvt_scalef32_pk32_bf6_bf16 : AMDGPUCvtScaleF32Intrinsic<llvm_v6i32_ty, llvm_v32bf16_ty, "cvt_scalef32_pk32_bf6_bf16">;
0607 def int_amdgcn_cvt_scalef32_2xpk16_fp6_f32 : AMDGPUCvtScaleF32ToFP6BF6Intrinsic<llvm_v6i32_ty, llvm_v16f32_ty, llvm_v16f32_ty, "cvt_scalef32_2xpk16_fp6_f32">;
0608 def int_amdgcn_cvt_scalef32_2xpk16_bf6_f32 : AMDGPUCvtScaleF32ToFP6BF6Intrinsic<llvm_v6i32_ty, llvm_v16f32_ty, llvm_v16f32_ty, "cvt_scalef32_2xpk16_bf6_f32">;
0609 
0610 def int_amdgcn_cvt_scalef32_sr_pk32_bf6_bf16 : AMDGPUCvtScaleF32SRIntrinsic<llvm_v6i32_ty, llvm_v32bf16_ty, "cvt_scalef32_sr_pk32_bf6_bf16">;
0611 def int_amdgcn_cvt_scalef32_sr_pk32_bf6_f16  : AMDGPUCvtScaleF32SRIntrinsic<llvm_v6i32_ty, llvm_v32f16_ty, "cvt_scalef32_sr_pk32_bf6_f16">;
0612 def int_amdgcn_cvt_scalef32_sr_pk32_bf6_f32  : AMDGPUCvtScaleF32SRIntrinsic<llvm_v6i32_ty, llvm_v32f32_ty, "cvt_scalef32_sr_pk32_bf6_f32">;
0613 def int_amdgcn_cvt_scalef32_sr_pk32_fp6_bf16 : AMDGPUCvtScaleF32SRIntrinsic<llvm_v6i32_ty, llvm_v32bf16_ty, "cvt_scalef32_sr_pk32_fp6_bf16">;
0614 def int_amdgcn_cvt_scalef32_sr_pk32_fp6_f16  : AMDGPUCvtScaleF32SRIntrinsic<llvm_v6i32_ty, llvm_v32f16_ty, "cvt_scalef32_sr_pk32_fp6_f16">;
0615 def int_amdgcn_cvt_scalef32_sr_pk32_fp6_f32  : AMDGPUCvtScaleF32SRIntrinsic<llvm_v6i32_ty, llvm_v32f32_ty, "cvt_scalef32_sr_pk32_fp6_f32">;
0616 
0617 class AMDGPUCvtScaleFP4FP8BF8ToF1632Intrinsic<LLVMType DstTy, string name> : DefaultAttrsIntrinsic<
0618   [DstTy],
0619   [llvm_i32_ty,   // src
0620    llvm_float_ty, // scale
0621    llvm_i32_ty],  // src_sel index [0..3]
0622   [IntrNoMem, IntrWillReturn, ImmArg<ArgIndex<2>>]
0623 >, ClangBuiltin<"__builtin_amdgcn_"#name>;
0624 
0625 class AMDGPUCvtScale_pk_FromFP8BF8Intrinsic<LLVMType DstTy, string name> : DefaultAttrsIntrinsic<
0626   [DstTy],
0627   [llvm_i32_ty,   // src
0628    llvm_float_ty, // scale
0629    llvm_i1_ty],   // src_lo_hi_sel[true false]
0630   [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]
0631 >, ClangBuiltin<"__builtin_amdgcn_"#name>;
0632 
0633 class AMDGPUCvtScaleF16BF16ToFP8BF8TiedInputIntrinsic<LLVMType SrcTy, string name> : DefaultAttrsIntrinsic<
0634   [llvm_v2i16_ty],
0635   [llvm_v2i16_ty, // old_vdst
0636    SrcTy,         // src
0637    llvm_float_ty, // scale
0638    llvm_i1_ty],   // dst_lo_hi_sel[true false]
0639   [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
0640 >, ClangBuiltin<"__builtin_amdgcn_"#name>;
0641 
0642 class AMDGPUCvtScaleF32ToFP8BF8TiedInputIntrinsic<string name> : DefaultAttrsIntrinsic<
0643   [llvm_v2i16_ty],
0644   [llvm_v2i16_ty, // old_vdst
0645    llvm_float_ty, // src0
0646    llvm_float_ty, // src1
0647    llvm_float_ty, // scale
0648    llvm_i1_ty],   // dst_lo_hi_sel[true false]
0649   [IntrNoMem, IntrWillReturn, ImmArg<ArgIndex<4>>]
0650 >, ClangBuiltin<"__builtin_amdgcn_"#name>;
0651 
0652 class AMDGPUCvtScaleFP8BF8ToF16TiedInputIntrinsic<LLVMType DstTy, string name> : DefaultAttrsIntrinsic<
0653   [DstTy],
0654   [llvm_v2f16_ty, // old_vdst
0655    llvm_i32_ty,   // src
0656    llvm_float_ty, // scale
0657    llvm_i32_ty,   // src_sel_index[0..3]
0658    llvm_i1_ty],   // dst_lo_hi_sel[true false]
0659   [IntrNoMem, IntrWillReturn, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]
0660 >, ClangBuiltin<"__builtin_amdgcn_"#name>;
0661 
0662 class AMDGPUCvtScaleF32ToFP4Intrinsic<string name> : DefaultAttrsIntrinsic<
0663   [llvm_i32_ty],
0664   [llvm_i32_ty,   // old_vdst
0665    llvm_float_ty, // src0
0666    llvm_float_ty, // src1
0667    llvm_float_ty, // scale
0668    llvm_i32_ty],  // dst_sel_index[0..3]
0669   [IntrNoMem, IntrWillReturn, ImmArg<ArgIndex<4>>]
0670 >, ClangBuiltin<"__builtin_amdgcn_"#name>;
0671 
0672 class AMDGPUCvtScaleF16ToFP4TiedInputIntrinsic<LLVMType SrcTy, string name> : DefaultAttrsIntrinsic<
0673   [llvm_i32_ty],
0674   [llvm_i32_ty,   // old_vdst
0675    SrcTy,         // src
0676    llvm_float_ty, // scale
0677    llvm_i32_ty],  // dest_sel_index [0..3]
0678   [IntrNoMem, IntrWillReturn, ImmArg<ArgIndex<3>>]
0679 >, ClangBuiltin<"__builtin_amdgcn_"#name>;
0680 
0681 class AMDGPUCvtScaleBF16F16F32SRToFP4BF8F8TiedInputIntrinsic<LLVMType Src0Ty, string name> : DefaultAttrsIntrinsic<
0682   [llvm_i32_ty],
0683   [llvm_i32_ty,   // old_vdst
0684    Src0Ty,        // src0
0685    llvm_i32_ty,   // seed
0686    llvm_float_ty, // scale
0687    llvm_i32_ty],  // dst_sel_index[0..3]
0688   [IntrNoMem, IntrWillReturn, ImmArg<ArgIndex<4>>]
0689 >, ClangBuiltin<"__builtin_amdgcn_"#name>;
0690 
0691 class AMDGPUCvtScaleSRF32ToBF16F16TiedInputIntrinsic<LLVMType DstTy, string name> : DefaultAttrsIntrinsic<
0692   [DstTy],
0693   [DstTy,         // old_vdst
0694    llvm_float_ty, // src0
0695    llvm_i32_ty,   // seed
0696    llvm_i1_ty],   // dst_lo_hi_sel[true false]
0697   [IntrNoMem, IntrWillReturn, ImmArg<ArgIndex<3>>]
0698 >, ClangBuiltin<"__builtin_amdgcn_"#name>;
0699 
0700 def int_amdgcn_cvt_sr_bf16_f32: AMDGPUCvtScaleSRF32ToBF16F16TiedInputIntrinsic<llvm_v2bf16_ty, "cvt_sr_bf16_f32">;
0701 def int_amdgcn_cvt_sr_f16_f32 : AMDGPUCvtScaleSRF32ToBF16F16TiedInputIntrinsic<llvm_v2f16_ty, "cvt_sr_f16_f32">;
0702 
0703 // llvm.amdgcn.cvt.scalef32.fp16.fp8 v2f16 old_vdst, int src, float scale, int src_sel_index [0..3], bool dst_lo_hi_sel
0704 def int_amdgcn_cvt_scalef32_f16_fp8  : AMDGPUCvtScaleFP8BF8ToF16TiedInputIntrinsic<llvm_v2f16_ty, "cvt_scalef32_f16_fp8">;
0705 def int_amdgcn_cvt_scalef32_f16_bf8  : AMDGPUCvtScaleFP8BF8ToF16TiedInputIntrinsic<llvm_v2f16_ty, "cvt_scalef32_f16_bf8">;
0706 
0707 // llvm.amdgcn.cvt.scalef32.f32.fp8 int src, float scale, int src_sel_index [0..3]
0708 def int_amdgcn_cvt_scalef32_f32_fp8  : AMDGPUCvtScaleFP4FP8BF8ToF1632Intrinsic<llvm_float_ty, "cvt_scalef32_f32_fp8">;
0709 def int_amdgcn_cvt_scalef32_f32_bf8  : AMDGPUCvtScaleFP4FP8BF8ToF1632Intrinsic<llvm_float_ty, "cvt_scalef32_f32_bf8">;
0710 
0711 // llvm.amdgcn.cvt.scalef32.pk.fp8.f32 v2i16 old_vdst, float srcA, float srcB, float scale, bool dst_lo_hi_sel
0712 def int_amdgcn_cvt_scalef32_pk_fp8_f32 : AMDGPUCvtScaleF32ToFP8BF8TiedInputIntrinsic<"cvt_scalef32_pk_fp8_f32">;
0713 def int_amdgcn_cvt_scalef32_pk_bf8_f32 : AMDGPUCvtScaleF32ToFP8BF8TiedInputIntrinsic<"cvt_scalef32_pk_bf8_f32">;
0714 
0715 // llvm.amdgcn.cvt.scalef32.pk.fp32.fp8 int src, float scale, bool src_lo_hi_sel
0716 def int_amdgcn_cvt_scalef32_pk_f32_fp8 : AMDGPUCvtScale_pk_FromFP8BF8Intrinsic<llvm_v2f32_ty, "cvt_scalef32_pk_f32_fp8">;
0717 def int_amdgcn_cvt_scalef32_pk_f32_bf8 : AMDGPUCvtScale_pk_FromFP8BF8Intrinsic<llvm_v2f32_ty, "cvt_scalef32_pk_f32_bf8">;
0718 
0719 // llvm.amdgcn.cvt.scalef32.fp8.fp16 v2i16 old_vdst, v2f16 src, float scale, bool dst_lo_hi_sel
0720 def int_amdgcn_cvt_scalef32_pk_fp8_f16 : AMDGPUCvtScaleF16BF16ToFP8BF8TiedInputIntrinsic<llvm_v2f16_ty, "cvt_scalef32_pk_fp8_f16">;
0721 def int_amdgcn_cvt_scalef32_pk_fp8_bf16: AMDGPUCvtScaleF16BF16ToFP8BF8TiedInputIntrinsic<llvm_v2bf16_ty, "cvt_scalef32_pk_fp8_bf16">;
0722 def int_amdgcn_cvt_scalef32_pk_bf8_f16 : AMDGPUCvtScaleF16BF16ToFP8BF8TiedInputIntrinsic<llvm_v2f16_ty, "cvt_scalef32_pk_bf8_f16">;
0723 def int_amdgcn_cvt_scalef32_pk_bf8_bf16: AMDGPUCvtScaleF16BF16ToFP8BF8TiedInputIntrinsic<llvm_v2bf16_ty, "cvt_scalef32_pk_bf8_bf16">;
0724 
0725 // llvm.amdgcn.cvt.scalef32.pk.f32.fp4 int src, float scale, int src_sel_index [0..3]
0726 def int_amdgcn_cvt_scalef32_pk_f32_fp4 : AMDGPUCvtScaleFP4FP8BF8ToF1632Intrinsic<llvm_v2f32_ty, "cvt_scalef32_pk_f32_fp4">;
0727 
0728 // llvm.amdgcn.cvt.scalef32.pk.fp4.f32 i32 old_vdst, float srcA, float srcB, float scale, int dst_sel_index[0..3]
0729 def int_amdgcn_cvt_scalef32_pk_fp4_f32 : AMDGPUCvtScaleF32ToFP4Intrinsic<"cvt_scalef32_pk_fp4_f32">;
0730 
0731 // llvm.amdgcn.cvt.scalef32.pk.f32.fp4 int src, float scale, int src_sel_index [0..3]
0732 def int_amdgcn_cvt_scalef32_pk_f16_fp4 : AMDGPUCvtScaleFP4FP8BF8ToF1632Intrinsic<llvm_v2f16_ty, "cvt_scalef32_pk_f16_fp4">;
0733 def int_amdgcn_cvt_scalef32_pk_bf16_fp4: AMDGPUCvtScaleFP4FP8BF8ToF1632Intrinsic<llvm_v2bf16_ty, "cvt_scalef32_pk_bf16_fp4">;
0734 
0735 // llvm.amdgcn.cvt.scalef32.pk32.f32.fp6 v6i32 src, float scale
0736 def int_amdgcn_cvt_scalef32_pk32_f32_fp6  : AMDGPUCvtScaleF32Intrinsic<llvm_v32f32_ty, llvm_v6i32_ty, "cvt_scalef32_pk32_f32_fp6">;
0737 def int_amdgcn_cvt_scalef32_pk32_f32_bf6  : AMDGPUCvtScaleF32Intrinsic<llvm_v32f32_ty, llvm_v6i32_ty, "cvt_scalef32_pk32_f32_bf6">;
0738 
0739 // llvm.amdgcn.cvt.scalef32.pk32.f16.fp6 v6i32 src, float scale
0740 def int_amdgcn_cvt_scalef32_pk32_f16_bf6  : AMDGPUCvtScaleF32Intrinsic<llvm_v32f16_ty,  llvm_v6i32_ty, "cvt_scalef32_pk32_f16_bf6">;
0741 def int_amdgcn_cvt_scalef32_pk32_bf16_bf6 : AMDGPUCvtScaleF32Intrinsic<llvm_v32bf16_ty, llvm_v6i32_ty, "cvt_scalef32_pk32_bf16_bf6">;
0742 def int_amdgcn_cvt_scalef32_pk32_f16_fp6  : AMDGPUCvtScaleF32Intrinsic<llvm_v32f16_ty,  llvm_v6i32_ty, "cvt_scalef32_pk32_f16_fp6">;
0743 def int_amdgcn_cvt_scalef32_pk32_bf16_fp6 : AMDGPUCvtScaleF32Intrinsic<llvm_v32bf16_ty, llvm_v6i32_ty, "cvt_scalef32_pk32_bf16_fp6">;
0744 
0745 // llvm.amdgcn.cvt.scalef32.pk.fp16.fp8 int src, float scale, bool src_lo_hi_sel
0746 def int_amdgcn_cvt_scalef32_pk_f16_bf8    : AMDGPUCvtScale_pk_FromFP8BF8Intrinsic<llvm_v2f16_ty, "cvt_scalef32_pk_f16_bf8">;
0747 def int_amdgcn_cvt_scalef32_pk_bf16_bf8   : AMDGPUCvtScale_pk_FromFP8BF8Intrinsic<llvm_v2bf16_ty, "cvt_scalef32_pk_bf16_bf8">;
0748 def int_amdgcn_cvt_scalef32_pk_f16_fp8    : AMDGPUCvtScale_pk_FromFP8BF8Intrinsic<llvm_v2f16_ty, "cvt_scalef32_pk_f16_fp8">;
0749 def int_amdgcn_cvt_scalef32_pk_bf16_fp8   : AMDGPUCvtScale_pk_FromFP8BF8Intrinsic<llvm_v2bf16_ty, "cvt_scalef32_pk_bf16_fp8">;
0750 
0751 // llvm.amdgcn.cvt.scalef32.pk.fp4.f16 int src, float scale, int dst_sel_index [0..3]
0752 def int_amdgcn_cvt_scalef32_pk_fp4_f16 : AMDGPUCvtScaleF16ToFP4TiedInputIntrinsic<llvm_v2f16_ty, "cvt_scalef32_pk_fp4_f16">;
0753 def int_amdgcn_cvt_scalef32_pk_fp4_bf16: AMDGPUCvtScaleF16ToFP4TiedInputIntrinsic<llvm_v2bf16_ty, "cvt_scalef32_pk_fp4_bf16">;
0754 
0755 def int_amdgcn_cvt_scalef32_sr_pk_fp4_f16: AMDGPUCvtScaleBF16F16F32SRToFP4BF8F8TiedInputIntrinsic<llvm_v2f16_ty, "cvt_scalef32_sr_pk_fp4_f16">;
0756 def int_amdgcn_cvt_scalef32_sr_pk_fp4_bf16: AMDGPUCvtScaleBF16F16F32SRToFP4BF8F8TiedInputIntrinsic<llvm_v2bf16_ty, "cvt_scalef32_sr_pk_fp4_bf16">;
0757 def int_amdgcn_cvt_scalef32_sr_pk_fp4_f32: AMDGPUCvtScaleBF16F16F32SRToFP4BF8F8TiedInputIntrinsic<llvm_v2f32_ty, "cvt_scalef32_sr_pk_fp4_f32">;
0758 def int_amdgcn_cvt_scalef32_sr_bf8_bf16: AMDGPUCvtScaleBF16F16F32SRToFP4BF8F8TiedInputIntrinsic<llvm_bfloat_ty, "cvt_scalef32_sr_bf8_bf16">;
0759 def int_amdgcn_cvt_scalef32_sr_bf8_f16: AMDGPUCvtScaleBF16F16F32SRToFP4BF8F8TiedInputIntrinsic<llvm_half_ty, "cvt_scalef32_sr_bf8_f16">;
0760 def int_amdgcn_cvt_scalef32_sr_bf8_f32: AMDGPUCvtScaleBF16F16F32SRToFP4BF8F8TiedInputIntrinsic<llvm_float_ty, "cvt_scalef32_sr_bf8_f32">;
0761 def int_amdgcn_cvt_scalef32_sr_fp8_bf16: AMDGPUCvtScaleBF16F16F32SRToFP4BF8F8TiedInputIntrinsic<llvm_bfloat_ty, "cvt_scalef32_sr_fp8_bf16">;
0762 def int_amdgcn_cvt_scalef32_sr_fp8_f16: AMDGPUCvtScaleBF16F16F32SRToFP4BF8F8TiedInputIntrinsic<llvm_half_ty, "cvt_scalef32_sr_fp8_f16">;
0763 def int_amdgcn_cvt_scalef32_sr_fp8_f32: AMDGPUCvtScaleBF16F16F32SRToFP4BF8F8TiedInputIntrinsic<llvm_float_ty, "cvt_scalef32_sr_fp8_f32">;
0764 
0765 def int_amdgcn_prng_b32 : DefaultAttrsIntrinsic<
0766   [llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]
0767 >, ClangBuiltin<"__builtin_amdgcn_prng_b32">;
0768 
0769 def int_amdgcn_bitop3 :
0770   DefaultAttrsIntrinsic<[llvm_anyint_ty],
0771                         [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
0772                         [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]>;
0773 
0774 } // TargetPrefix = "amdgcn"
0775 
0776 // New-style image intrinsics
0777 
0778 //////////////////////////////////////////////////////////////////////////
0779 // Dimension-aware image intrinsics framework
0780 //////////////////////////////////////////////////////////////////////////
0781 
0782 // Helper class to represent (type, name) combinations of arguments. The
0783 // argument names are explanatory and used as DAG operand names for codegen
0784 // pattern matching.
0785 class AMDGPUArg<LLVMType ty, string name> {
0786   LLVMType Type = ty;
0787   string Name = name;
0788 }
0789 
0790 // Return [AMDGPUArg<basety, names[0]>, AMDGPUArg<LLVMMatchType<0>, names[1]>, ...]
0791 class makeArgList<list<string> names, LLVMType basety> {
0792   list<AMDGPUArg> ret =
0793     !listconcat([AMDGPUArg<basety, names[0]>],
0794                 !foreach(name, !tail(names), AMDGPUArg<LLVMMatchType<0>, name>));
0795 }
0796 
0797 // Return arglist, with LLVMMatchType's references shifted by 'shift'.
0798 class arglistmatchshift<list<AMDGPUArg> arglist, int shift> {
0799   list<AMDGPUArg> ret =
0800     !foreach(arg, arglist,
0801              !if(!isa<LLVMMatchType>(arg.Type),
0802                  AMDGPUArg<LLVMMatchType<!add(!cast<LLVMMatchType>(arg.Type).Number, shift)>,
0803                            arg.Name>,
0804                  arg));
0805 }
0806 
0807 // Return the concatenation of the given arglists. LLVMMatchType's are adjusted
0808 // accordingly, and shifted by an additional 'shift'.
0809 class arglistconcat<list<list<AMDGPUArg>> arglists, int shift = 0> {
0810   list<AMDGPUArg> ret =
0811     !foldl([]<AMDGPUArg>, arglists, lhs, rhs,
0812            !listconcat(
0813              lhs,
0814              arglistmatchshift<rhs,
0815                                !add(shift, !foldl(0, lhs, a, b,
0816                                                   !add(a, b.Type.isAny)))>.ret));
0817 }
0818 
0819 // Represent texture/image types / dimensionality.
0820 class AMDGPUDimProps<bits<3> enc, string name, string asmsuffix,
0821                      list<string> coord_names, list<string> slice_names,
0822                      bit msaa = 0> {
0823   AMDGPUDimProps Dim = !cast<AMDGPUDimProps>(NAME);
0824   string Name = name; // e.g. "2darraymsaa"
0825   string AsmSuffix = asmsuffix; // e.g. 2D_MSAA_ARRAY (used in assembly strings)
0826   bits<3> Encoding = enc;
0827   bit DA = 0; // DA bit in MIMG encoding
0828   bit MSAA = msaa;
0829 
0830   list<AMDGPUArg> CoordSliceArgs =
0831     makeArgList<!listconcat(coord_names, slice_names), llvm_anyfloat_ty>.ret;
0832   list<AMDGPUArg> CoordSliceIntArgs =
0833     makeArgList<!listconcat(coord_names, slice_names), llvm_anyint_ty>.ret;
0834   list<AMDGPUArg> GradientArgs =
0835     makeArgList<!listconcat(!foreach(name, coord_names, "d" # name # "dh"),
0836                             !foreach(name, coord_names, "d" # name # "dv")),
0837                 llvm_anyfloat_ty>.ret;
0838 
0839   bits<8> NumCoords = !size(CoordSliceArgs);
0840   bits<8> NumGradients = !size(GradientArgs);
0841 }
0842 
0843 def AMDGPUDim1D : AMDGPUDimProps<0x0, "1d", "1D", ["s"], []>;
0844 def AMDGPUDim2D : AMDGPUDimProps<0x1, "2d", "2D", ["s", "t"], []>;
0845 def AMDGPUDim3D : AMDGPUDimProps<0x2, "3d", "3D", ["s", "t", "r"], []>;
0846 let DA = 1 in {
0847   def AMDGPUDimCube : AMDGPUDimProps<0x3, "cube", "CUBE", ["s", "t"], ["face"]>;
0848   def AMDGPUDim1DArray : AMDGPUDimProps<0x4, "1darray", "1D_ARRAY", ["s"], ["slice"]>;
0849   def AMDGPUDim2DArray : AMDGPUDimProps<0x5, "2darray", "2D_ARRAY", ["s", "t"], ["slice"]>;
0850 }
0851 def AMDGPUDim2DMsaa : AMDGPUDimProps<0x6, "2dmsaa", "2D_MSAA", ["s", "t"], ["fragid"], 1>;
0852 let DA = 1 in {
0853   def AMDGPUDim2DArrayMsaa : AMDGPUDimProps<0x7, "2darraymsaa", "2D_MSAA_ARRAY", ["s", "t"], ["slice", "fragid"], 1>;
0854 }
0855 
0856 def AMDGPUDims {
0857   list<AMDGPUDimProps> NoMsaa = [AMDGPUDim1D, AMDGPUDim2D, AMDGPUDim3D,
0858                                  AMDGPUDimCube, AMDGPUDim1DArray,
0859                                  AMDGPUDim2DArray];
0860   list<AMDGPUDimProps> Msaa = [AMDGPUDim2DMsaa, AMDGPUDim2DArrayMsaa];
0861   list<AMDGPUDimProps> All = !listconcat(NoMsaa, Msaa);
0862 }
0863 
0864 // Represent sample variants, i.e. _C, _O, _B, ... and combinations thereof.
0865 class AMDGPUSampleVariant<string ucmod, string lcmod, list<AMDGPUArg> extra_addr> {
0866   string UpperCaseMod = ucmod;
0867   string LowerCaseMod = lcmod;
0868 
0869   // {offset} {bias} {z-compare}
0870   list<AMDGPUArg> ExtraAddrArgs = extra_addr;
0871   bit Offset = false;
0872   bit Bias = false;
0873   bit ZCompare = false;
0874   bit Gradients = false;
0875 
0876   // Name of the {lod} or {clamp} argument that is appended to the coordinates,
0877   // if any.
0878   string LodOrClamp = "";
0879 
0880   bit UsesWQM = false;
0881 }
0882 
0883 // AMDGPUSampleVariants: all variants supported by IMAGE_SAMPLE
0884 // AMDGPUSampleVariantsNoGradients: variants supported by IMAGE_GATHER4
0885 defset list<AMDGPUSampleVariant> AMDGPUSampleVariants = {
0886   multiclass AMDGPUSampleHelper_Offset<string ucmod, string lcmod,
0887                                        list<AMDGPUArg> extra_addr> {
0888     def NAME#lcmod : AMDGPUSampleVariant<ucmod, lcmod, extra_addr>;
0889     let Offset = true in
0890     def NAME#lcmod#_o : AMDGPUSampleVariant<
0891         ucmod#"_O", lcmod#"_o", !listconcat([AMDGPUArg<llvm_i32_ty, "offset">], extra_addr)>;
0892   }
0893 
0894   multiclass AMDGPUSampleHelper_Compare<string ucmod, string lcmod,
0895                                         list<AMDGPUArg> extra_addr> {
0896     defm NAME : AMDGPUSampleHelper_Offset<ucmod, lcmod, extra_addr>;
0897     let ZCompare = true in
0898     defm NAME : AMDGPUSampleHelper_Offset<
0899         "_C"#ucmod, "_c"#lcmod, !listconcat(extra_addr, [AMDGPUArg<llvm_float_ty, "zcompare">])>;
0900   }
0901 
0902   multiclass AMDGPUSampleHelper_Clamp<string ucmod, string lcmod,
0903                                       list<AMDGPUArg> extra_addr> {
0904     defm NAME : AMDGPUSampleHelper_Compare<ucmod, lcmod, extra_addr>;
0905     let LodOrClamp = "clamp" in
0906     defm NAME : AMDGPUSampleHelper_Compare<ucmod#"_CL", lcmod#"_cl", extra_addr>;
0907   }
0908 
0909   defset list<AMDGPUSampleVariant> AMDGPUSampleVariantsNoGradients = {
0910     let UsesWQM = true in
0911     defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"", "", []>;
0912     let Bias = true, UsesWQM = true in
0913     defm AMDGPUSample : AMDGPUSampleHelper_Clamp<
0914         "_B", "_b", [AMDGPUArg<llvm_anyfloat_ty, "bias">]>;
0915     let LodOrClamp = "lod" in
0916     defm AMDGPUSample : AMDGPUSampleHelper_Compare<"_L", "_l", []>;
0917     defm AMDGPUSample : AMDGPUSampleHelper_Compare<"_LZ", "_lz", []>;
0918   }
0919 
0920   let Gradients = true in {
0921     defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"_D", "_d", []>;
0922     defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"_CD", "_cd", []>;
0923   }
0924 }
0925 
0926 // Helper class to capture the profile of a dimension-aware image intrinsic.
0927 // This information is used to generate the intrinsic's type and to inform
0928 // codegen pattern matching.
0929 class AMDGPUDimProfile<string opmod,
0930                        AMDGPUDimProps dim> {
0931   AMDGPUDimProps Dim = dim;
0932   string OpMod = opmod; // the corresponding instruction is named IMAGE_OpMod
0933 
0934   // These are intended to be overwritten by subclasses
0935   bit IsSample = false;
0936   bit IsAtomic = false;
0937   list<LLVMType> RetTypes = [];
0938   list<AMDGPUArg> DataArgs = [];
0939   list<AMDGPUArg> ExtraAddrArgs = [];
0940   bit Offset = false;
0941   bit Bias = false;
0942   bit ZCompare = false;
0943   bit Gradients = false;
0944   string LodClampMip = "";
0945 
0946   int NumRetAndDataAnyTypes =
0947     !foldl(0, !listconcat(RetTypes, !foreach(arg, DataArgs, arg.Type)), a, b,
0948            !add(a, b.isAny));
0949 
0950   list<AMDGPUArg> AddrArgs =
0951     arglistconcat<[ExtraAddrArgs,
0952                    !if(Gradients, dim.GradientArgs, []),
0953                    !listconcat(!if(IsSample, dim.CoordSliceArgs, dim.CoordSliceIntArgs),
0954                                !if(!empty(LodClampMip),
0955                                    []<AMDGPUArg>,
0956                                    [AMDGPUArg<LLVMMatchType<0>, LodClampMip>]))],
0957                   NumRetAndDataAnyTypes>.ret;
0958   list<LLVMType> AddrTypes = !foreach(arg, AddrArgs, arg.Type);
0959   list<AMDGPUArg> AddrDefaultArgs =
0960     !foreach(arg, AddrArgs,
0961              AMDGPUArg<!if(!or(arg.Type.isAny, !isa<LLVMMatchType>(arg.Type)),
0962                            !if(IsSample, llvm_float_ty, llvm_i32_ty), arg.Type),
0963                        arg.Name>);
0964   list<AMDGPUArg> AddrA16Args =
0965     !foreach(arg, AddrArgs,
0966              AMDGPUArg<!if(!or(arg.Type.isAny, !isa<LLVMMatchType>(arg.Type)),
0967                            !if(IsSample, llvm_half_ty, llvm_i16_ty), arg.Type),
0968                        arg.Name>);
0969 }
0970 
0971 class AMDGPUDimProfileCopy<AMDGPUDimProfile base> : AMDGPUDimProfile<base.OpMod, base.Dim> {
0972   let IsSample = base.IsSample;
0973   let IsAtomic = base.IsAtomic;
0974   let RetTypes = base.RetTypes;
0975   let DataArgs = base.DataArgs;
0976   let ExtraAddrArgs = base.ExtraAddrArgs;
0977   let Offset = base.Offset;
0978   let Bias = base.Bias;
0979   let ZCompare = base.ZCompare;
0980   let Gradients = base.Gradients;
0981   let LodClampMip = base.LodClampMip;
0982 }
0983 
0984 class AMDGPUDimSampleProfile<string opmod,
0985                              AMDGPUDimProps dim,
0986                              AMDGPUSampleVariant sample,
0987                              bit has_return = true> : AMDGPUDimProfile<opmod, dim> {
0988   let IsSample = true;
0989   let RetTypes = !if(has_return, [llvm_any_ty], []);
0990   let ExtraAddrArgs = sample.ExtraAddrArgs;
0991   let Offset = sample.Offset;
0992   let Bias = sample.Bias;
0993   let ZCompare = sample.ZCompare;
0994   let Gradients = sample.Gradients;
0995   let LodClampMip = sample.LodOrClamp;
0996 }
0997 
0998 class AMDGPUDimSampleNoReturnProfile<string opmod,
0999                              AMDGPUDimProps dim,
1000                              AMDGPUSampleVariant sample>
1001     : AMDGPUDimSampleProfile<opmod, dim, sample, false> {
1002 }
1003 
1004 class AMDGPUDimNoSampleProfile<string opmod,
1005                                AMDGPUDimProps dim,
1006                                list<LLVMType> retty,
1007                                list<AMDGPUArg> dataargs,
1008                                bit Mip = false> : AMDGPUDimProfile<opmod, dim> {
1009   let RetTypes = retty;
1010   let DataArgs = dataargs;
1011   let LodClampMip = !if(Mip, "mip", "");
1012 }
1013 
1014 class AMDGPUDimAtomicProfile<string opmod,
1015                              AMDGPUDimProps dim,
1016                              list<AMDGPUArg> dataargs,
1017                              LLVMType rettype> : AMDGPUDimProfile<opmod, dim> {
1018   let RetTypes = [rettype];
1019   let DataArgs = dataargs;
1020   let IsAtomic = true;
1021 }
1022 
1023 class AMDGPUDimGetResInfoProfile<AMDGPUDimProps dim>
1024     : AMDGPUDimProfile<"GET_RESINFO", dim> {
1025   let RetTypes = [llvm_anyfloat_ty];
1026   let DataArgs = [];
1027   let AddrArgs = [AMDGPUArg<llvm_anyint_ty, "mip">];
1028   let LodClampMip = "mip";
1029 }
1030 
1031 // Helper class for figuring out image intrinsic argument indexes.
1032 class AMDGPUImageDimIntrinsicEval<AMDGPUDimProfile P_> {
1033   int NumDataArgs = !size(P_.DataArgs);
1034   int NumDmaskArgs = !not(P_.IsAtomic);
1035   int NumOffsetArgs = !if(P_.Offset, 1, 0);
1036   int NumBiasArgs = !if(P_.Bias, 1, 0);
1037   int NumZCompareArgs = !if(P_.ZCompare, 1, 0);
1038   int NumExtraAddrArgs = !add(NumOffsetArgs, NumBiasArgs, NumZCompareArgs);
1039   int NumVAddrArgs = !size(P_.AddrArgs);
1040   int NumGradientArgs = !if(P_.Gradients, !size(P_.Dim.GradientArgs), 0);
1041   int NumCoordArgs = !if(P_.IsSample, !size(P_.Dim.CoordSliceArgs), !size(P_.Dim.CoordSliceIntArgs));
1042   int NumRSrcArgs = 1;
1043   int NumSampArgs = !if(P_.IsSample, 2, 0);
1044   int DmaskArgIndex = NumDataArgs;
1045   int VAddrArgIndex = !add(DmaskArgIndex, NumDmaskArgs);
1046   int OffsetArgIndex = VAddrArgIndex;
1047   int BiasArgIndex = !add(VAddrArgIndex, NumOffsetArgs);
1048   int ZCompareArgIndex = !add(BiasArgIndex, NumBiasArgs);
1049   int GradientArgIndex = !add(VAddrArgIndex, NumExtraAddrArgs);
1050   int CoordArgIndex = !add(GradientArgIndex, NumGradientArgs);
1051   int LodArgIndex = !add(VAddrArgIndex, NumVAddrArgs, -1);
1052   int MipArgIndex = LodArgIndex;
1053   int RsrcArgIndex = !add(VAddrArgIndex, NumVAddrArgs);
1054   int SampArgIndex = !add(RsrcArgIndex, NumRSrcArgs);
1055   int UnormArgIndex = !add(SampArgIndex, 1);
1056   int TexFailCtrlArgIndex = !add(SampArgIndex, NumSampArgs);
1057   int CachePolicyArgIndex = !add(TexFailCtrlArgIndex, 1);
1058 }
1059 
1060 // All dimension-aware intrinsics are derived from this class.
1061 class AMDGPUImageDimIntrinsic<AMDGPUDimProfile P_,
1062                               list<IntrinsicProperty> props,
1063                               list<SDNodeProperty> sdnodeprops> : Intrinsic<
1064     P_.RetTypes,        // vdata(VGPR) -- for load/atomic-with-return
1065     !listconcat(
1066       !foreach(arg, P_.DataArgs, arg.Type),    // vdata(VGPR) -- for store/atomic
1067       !if(P_.IsAtomic, [], [llvm_i32_ty]),     // dmask(imm)
1068       P_.AddrTypes,                            // vaddr(VGPR)
1069       [llvm_any_ty],                           // rsrc(SGPR); Valid types: v4i32 and v8i32
1070       !if(P_.IsSample, [llvm_any_ty,           // samp(SGPR);
1071                         llvm_i1_ty], []),      // unorm(imm)
1072       [llvm_i32_ty,                            // texfailctrl(imm; bit 0 = tfe, bit 1 = lwe)
1073        llvm_i32_ty]),                          // auxiliary/cachepolicy(imm):
1074                                                //                bit 0 = glc, bit 1 = slc,
1075                                                //                bit 2 = dlc (gfx10/gfx11),
1076                                                //                bit 4 = scc (gfx90a)
1077                                                //        gfx940: bit 0 = sc0, bit 1 = nt, bit 4 = sc1
1078                                                //        gfx12+: bits [0-2] = th, bits [3-4] = scope
1079      !listconcat(props, [IntrNoCallback, IntrNoFree, IntrWillReturn],
1080           !if(P_.IsAtomic, [], [ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.DmaskArgIndex>>]),
1081           !if(P_.IsSample, [ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.UnormArgIndex>>], []),
1082           [ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.TexFailCtrlArgIndex>>,
1083            ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.CachePolicyArgIndex>>],
1084           !if(P_.IsAtomic, [], [IntrNoSync])),
1085 
1086 
1087       "", sdnodeprops>,
1088   AMDGPURsrcIntrinsic<!add(!size(P_.DataArgs), !size(P_.AddrTypes),
1089                            !if(P_.IsAtomic, 0, 1)), 1> {
1090   AMDGPUDimProfile P = P_;
1091 
1092   AMDGPUImageDimIntrinsic Intr = !cast<AMDGPUImageDimIntrinsic>(NAME);
1093 
1094   let TargetPrefix = "amdgcn";
1095 }
1096 
1097 // Marker class for intrinsics with a DMask that determines the returned
1098 // channels.
1099 class AMDGPUImageDMaskIntrinsic;
1100 
1101 defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimIntrinsics = {
1102 
1103   //////////////////////////////////////////////////////////////////////////
1104   // Load and store intrinsics
1105   //////////////////////////////////////////////////////////////////////////
1106   multiclass AMDGPUImageDimIntrinsicsNoMsaa<string opmod,
1107                                             list<LLVMType> retty,
1108                                             list<AMDGPUArg> dataargs,
1109                                             list<IntrinsicProperty> props,
1110                                             list<SDNodeProperty> sdnodeprops,
1111                                             bit Mip = false> {
1112     foreach dim = AMDGPUDims.NoMsaa in {
1113       def !strconcat(NAME, "_", dim.Name)
1114         : AMDGPUImageDimIntrinsic<
1115             AMDGPUDimNoSampleProfile<opmod, dim, retty, dataargs, Mip>,
1116             props, sdnodeprops>;
1117     }
1118   }
1119 
1120   multiclass AMDGPUImageDimIntrinsicsAll<string opmod,
1121                                          list<LLVMType> retty,
1122                                          list<AMDGPUArg> dataargs,
1123                                          list<IntrinsicProperty> props,
1124                                          list<SDNodeProperty> sdnodeprops,
1125                                          bit Mip = false> {
1126     foreach dim = AMDGPUDims.All in {
1127       def !strconcat(NAME, "_", dim.Name)
1128         : AMDGPUImageDimIntrinsic<
1129             AMDGPUDimNoSampleProfile<opmod, dim, retty, dataargs, Mip>,
1130             props, sdnodeprops>;
1131     }
1132   }
1133 
1134   defm int_amdgcn_image_load
1135     : AMDGPUImageDimIntrinsicsAll<"LOAD", [llvm_any_ty], [], [IntrReadMem],
1136                                   [SDNPMemOperand]>,
1137       AMDGPUImageDMaskIntrinsic;
1138   defm int_amdgcn_image_load_mip
1139     : AMDGPUImageDimIntrinsicsNoMsaa<"LOAD_MIP", [llvm_any_ty], [],
1140                                      [IntrReadMem, IntrWillReturn], [SDNPMemOperand], 1>,
1141       AMDGPUImageDMaskIntrinsic;
1142 
1143   defm int_amdgcn_image_store : AMDGPUImageDimIntrinsicsAll<
1144               "STORE", [], [AMDGPUArg<llvm_anyfloat_ty, "vdata">],
1145               [IntrWriteMem, IntrWillReturn], [SDNPMemOperand]>,
1146               AMDGPUImageDMaskIntrinsic;
1147   defm int_amdgcn_image_store_mip : AMDGPUImageDimIntrinsicsNoMsaa<
1148               "STORE_MIP", [], [AMDGPUArg<llvm_anyfloat_ty, "vdata">],
1149               [IntrWriteMem, IntrWillReturn], [SDNPMemOperand], 1>,
1150               AMDGPUImageDMaskIntrinsic;
1151 
1152   //////////////////////////////////////////////////////////////////////////
1153   // MSAA intrinsics
1154   //////////////////////////////////////////////////////////////////////////
1155   foreach dim = AMDGPUDims.Msaa in {
1156     def int_amdgcn_image_msaa_load_x # _ # dim.Name:
1157         AMDGPUImageDimIntrinsic<
1158             AMDGPUDimNoSampleProfile<"MSAA_LOAD_X", dim, [llvm_any_ty], []>,
1159             [IntrReadMem], [SDNPMemOperand]>;
1160   }
1161 
1162   foreach dim = AMDGPUDims.Msaa in {
1163     def int_amdgcn_image_msaa_load # _ # dim.Name:
1164         AMDGPUImageDimIntrinsic<
1165             AMDGPUDimNoSampleProfile<"MSAA_LOAD", dim, [llvm_any_ty], []>,
1166             [IntrReadMem], [SDNPMemOperand]>;
1167   }
1168 
1169   //////////////////////////////////////////////////////////////////////////
1170   // sample and getlod intrinsics
1171   //////////////////////////////////////////////////////////////////////////
1172   multiclass AMDGPUImageDimSampleDims<string opmod,
1173                                       AMDGPUSampleVariant sample,
1174                                       bit NoMem = false> {
1175     foreach dim = AMDGPUDims.NoMsaa in {
1176       def !strconcat(NAME, "_", dim.Name) : AMDGPUImageDimIntrinsic<
1177           AMDGPUDimSampleProfile<opmod, dim, sample>,
1178           !listconcat(!if(NoMem, [IntrNoMem], [IntrReadMem]),
1179                       !if(sample.UsesWQM, [IntrConvergent], [])),
1180           !if(NoMem, [], [SDNPMemOperand])>;
1181     }
1182   }
1183 
1184   foreach sample = AMDGPUSampleVariants in {
1185     defm int_amdgcn_image_sample # sample.LowerCaseMod
1186       : AMDGPUImageDimSampleDims<"SAMPLE" # sample.UpperCaseMod, sample>,
1187         AMDGPUImageDMaskIntrinsic;
1188   }
1189 
1190   multiclass AMDGPUImageDimSampleNoReturnDims<string opmod,
1191                                       AMDGPUSampleVariant sample> {
1192     foreach dim = AMDGPUDims.NoMsaa in {
1193       def !strconcat(NAME, "_", dim.Name, "_nortn") : AMDGPUImageDimIntrinsic<
1194           AMDGPUDimSampleNoReturnProfile<opmod, dim, sample>,
1195           !listconcat([IntrWillReturn], !if(sample.UsesWQM, [IntrConvergent], [])),
1196           [SDNPMemOperand]>;
1197     }
1198   }
1199   foreach sample = AMDGPUSampleVariants in {
1200     defm int_amdgcn_image_sample # sample.LowerCaseMod
1201       : AMDGPUImageDimSampleNoReturnDims<
1202         "SAMPLE" # sample.UpperCaseMod # "_nortn", sample>,
1203         AMDGPUImageDMaskIntrinsic;
1204   }
1205 
1206   defm int_amdgcn_image_getlod
1207     : AMDGPUImageDimSampleDims<"GET_LOD", AMDGPUSample, 1>,
1208       AMDGPUImageDMaskIntrinsic;
1209 
1210   //////////////////////////////////////////////////////////////////////////
1211   // getresinfo intrinsics
1212   //////////////////////////////////////////////////////////////////////////
1213   foreach dim = AMDGPUDims.All in {
1214     def !strconcat("int_amdgcn_image_getresinfo_", dim.Name)
1215       : AMDGPUImageDimIntrinsic<AMDGPUDimGetResInfoProfile<dim>, [IntrNoMem], []>,
1216         AMDGPUImageDMaskIntrinsic;
1217   }
1218 
1219   //////////////////////////////////////////////////////////////////////////
1220   // gather4 intrinsics
1221   //////////////////////////////////////////////////////////////////////////
1222   foreach sample = AMDGPUSampleVariantsNoGradients in {
1223     foreach dim = [AMDGPUDim2D, AMDGPUDimCube, AMDGPUDim2DArray] in {
1224       def int_amdgcn_image_gather4 # sample.LowerCaseMod # _ # dim.Name:
1225           AMDGPUImageDimIntrinsic<
1226               AMDGPUDimSampleProfile<"GATHER4" # sample.UpperCaseMod, dim, sample>,
1227               [IntrReadMem], [SDNPMemOperand]>;
1228     }
1229   }
1230 }
1231 
1232 //////////////////////////////////////////////////////////////////////////
1233 // atomic intrinsics
1234 //////////////////////////////////////////////////////////////////////////
1235 defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimAtomicIntrinsics = {
1236   multiclass AMDGPUImageDimAtomicX<string opmod, list<AMDGPUArg> dataargs,
1237                                    LLVMType rettype = llvm_anyint_ty> {
1238         foreach dim = AMDGPUDims.All in {
1239           def !strconcat(NAME, "_", dim.Name):
1240             AMDGPUImageDimIntrinsic<AMDGPUDimAtomicProfile<opmod, dim, dataargs, rettype>,
1241             [], [SDNPMemOperand]>;
1242         }
1243   }
1244 
1245   multiclass AMDGPUImageDimAtomic<string opmod, LLVMType rettype = llvm_anyint_ty> :
1246     AMDGPUImageDimAtomicX<opmod, [AMDGPUArg<LLVMMatchType<0>, "vdata">], rettype>;
1247 
1248   multiclass AMDGPUImageDimFloatAtomic<string opmod> :
1249     AMDGPUImageDimAtomic<opmod, llvm_anyfloat_ty>;
1250 
1251   multiclass AMDGPUImageDimAnyAtomic<string opmod> :
1252     AMDGPUImageDimAtomic<opmod, llvm_any_ty>;
1253 
1254   defm int_amdgcn_image_atomic_swap : AMDGPUImageDimAnyAtomic<"ATOMIC_SWAP">;
1255   defm int_amdgcn_image_atomic_add : AMDGPUImageDimAtomic<"ATOMIC_ADD">;
1256   defm int_amdgcn_image_atomic_sub : AMDGPUImageDimAtomic<"ATOMIC_SUB">;
1257   defm int_amdgcn_image_atomic_smin : AMDGPUImageDimAtomic<"ATOMIC_SMIN">;
1258   defm int_amdgcn_image_atomic_umin : AMDGPUImageDimAtomic<"ATOMIC_UMIN">;
1259   defm int_amdgcn_image_atomic_fmin : AMDGPUImageDimFloatAtomic<"ATOMIC_FMIN">;
1260   defm int_amdgcn_image_atomic_smax : AMDGPUImageDimAtomic<"ATOMIC_SMAX">;
1261   defm int_amdgcn_image_atomic_umax : AMDGPUImageDimAtomic<"ATOMIC_UMAX">;
1262   defm int_amdgcn_image_atomic_fmax : AMDGPUImageDimFloatAtomic<"ATOMIC_FMAX">;
1263   defm int_amdgcn_image_atomic_and : AMDGPUImageDimAtomic<"ATOMIC_AND">;
1264   defm int_amdgcn_image_atomic_or : AMDGPUImageDimAtomic<"ATOMIC_OR">;
1265   defm int_amdgcn_image_atomic_xor : AMDGPUImageDimAtomic<"ATOMIC_XOR">;
1266   defm int_amdgcn_image_atomic_inc : AMDGPUImageDimAtomic<"ATOMIC_INC">;
1267   defm int_amdgcn_image_atomic_dec : AMDGPUImageDimAtomic<"ATOMIC_DEC">;
1268   defm int_amdgcn_image_atomic_add_flt : AMDGPUImageDimFloatAtomic<"ATOMIC_ADD_FLT">;
1269   defm int_amdgcn_image_atomic_min_flt : AMDGPUImageDimFloatAtomic<"ATOMIC_MIN_FLT">;
1270   defm int_amdgcn_image_atomic_max_flt : AMDGPUImageDimFloatAtomic<"ATOMIC_MAX_FLT">;
1271 
1272   defm int_amdgcn_image_atomic_cmpswap :
1273       AMDGPUImageDimAtomicX<"ATOMIC_CMPSWAP", [AMDGPUArg<LLVMMatchType<0>, "src">,
1274                                                AMDGPUArg<LLVMMatchType<0>, "cmp">]>;
1275 
1276   defm int_amdgcn_image_atomic_pk_add_f16 : AMDGPUImageDimFloatAtomic<"ATOMIC_PK_ADD_F16">;
1277   defm int_amdgcn_image_atomic_pk_add_bf16 : AMDGPUImageDimFloatAtomic<"ATOMIC_PK_ADD_BF16">;
1278 }
1279 
1280 //////////////////////////////////////////////////////////////////////////
1281 // Buffer intrinsics
1282 //////////////////////////////////////////////////////////////////////////
1283 
1284 // Data type for buffer resources (V#). Maybe, in the future, we can create a
1285 // similar one for textures (T#).
1286 def AMDGPUBufferRsrcTy : LLVMQualPointerType<8>;
1287 
1288 let TargetPrefix = "amdgcn" in {
1289 
1290 def int_amdgcn_make_buffer_rsrc : DefaultAttrsIntrinsic <
1291   [AMDGPUBufferRsrcTy],
1292   [llvm_anyptr_ty, // base
1293    llvm_i16_ty,    // stride (and swizzle control)
1294    llvm_i32_ty,    // NumRecords / extent
1295    llvm_i32_ty],   // flags
1296   // Attributes lifted from ptrmask + some extra argument attributes.
1297   [IntrNoMem, ReadNone<ArgIndex<0>>,
1298    IntrSpeculatable, IntrWillReturn]>;
1299 
1300 defset list<AMDGPURsrcIntrinsic> AMDGPUBufferIntrinsics = {
1301 
1302 // Generate a buffer_load instruction that may be optimized to s_buffer_load if
1303 // the offset argument is uniform.
1304 def int_amdgcn_s_buffer_load : DefaultAttrsIntrinsic <
1305   [llvm_any_ty],
1306   [llvm_v4i32_ty,    // rsrc(SGPR)
1307    llvm_i32_ty,      // byte offset
1308    llvm_i32_ty],     // auxiliary/cachepolicy(imm):
1309                      //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1310                      //                bit 3 = swz, bit 4 = scc (gfx90a)
1311                      //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1312                      //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1313                      //                bit 6 = swz
1314                      // Note: volatile bit is **not** permitted here.
1315   [IntrNoMem, ImmArg<ArgIndex<2>>]>,
1316   AMDGPURsrcIntrinsic<0>;
1317 
1318 // Buffer intrinsics with separate raw and struct variants.  The raw
1319 // variant never has an index. The struct variant always has an index, even if
1320 // it is const 0. A struct intrinsic with constant 0 index is different to the
1321 // corresponding raw intrinsic on gfx9+ because the behavior of bound checking
1322 // and swizzling changes depending on whether idxen is set in the instruction.
1323 // These intrinsics also keep the offset and soffset arguments separate as
1324 // they behave differently in bounds checking and swizzling.
1325 
1326 // The versions of these intrinsics that take <4 x i32> arguments are deprecated
1327 // in favor of their .ptr.buffer variants that take ptr addrspace(8) arguments,
1328 // which allow for improved reasoning about memory accesses.
1329 //
1330 // Note that in the cachepolicy for all these intrinsics, bit 31 is not preserved
1331 // through to final assembly selection and is used to signal that the buffer
1332 // operation is volatile.
1333 class AMDGPURawBufferLoad : DefaultAttrsIntrinsic <
1334   [llvm_any_ty],
1335   [llvm_v4i32_ty,    // rsrc(SGPR)
1336    llvm_i32_ty,      // offset(VGPR/imm, included in bounds checking and swizzling)
1337    llvm_i32_ty,      // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1338    llvm_i32_ty],     // auxiliary/cachepolicy(imm):
1339                      //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1340                      //                bit 3 = swz, bit 4 = scc (gfx90a)
1341                      //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1342                      //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1343                      //                bit 6 = swz
1344                      //           all: volatile op (bit 31, stripped at lowering)
1345   [IntrReadMem, ImmArg<ArgIndex<3>>], "", [SDNPMemOperand]>,
1346   AMDGPURsrcIntrinsic<0>;
1347 def int_amdgcn_raw_buffer_load_format : AMDGPURawBufferLoad;
1348 def int_amdgcn_raw_buffer_load : AMDGPURawBufferLoad;
1349 
1350 class AMDGPURawAtomicBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
1351   [data_ty],
1352   [llvm_v4i32_ty,     // rsrc(SGPR)
1353    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
1354    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1355    llvm_i32_ty],      // auxiliary data (imm, cachepolicy     (bit 0 = glc,
1356                       //                                       bit 1 = slc,
1357                       //                                       bit 2 = dlc on gfx10+),
1358                       //                      swizzled buffer (bit 3 = swz))
1359   [ImmArg<ArgIndex<3>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
1360   AMDGPURsrcIntrinsic<0>;
1361 def int_amdgcn_raw_atomic_buffer_load : AMDGPURawAtomicBufferLoad;
1362 
1363 class AMDGPURawPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
1364   [data_ty],
1365   [AMDGPUBufferRsrcTy,    // rsrc(SGPR)
1366    llvm_i32_ty,           // offset(VGPR/imm, included in bounds checking and swizzling)
1367    llvm_i32_ty,           // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1368    llvm_i32_ty],          // auxiliary/cachepolicy(imm):
1369                           //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1370                           //                bit 3 = swz, bit 4 = scc (gfx90a)
1371                           //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1372                           //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1373                           //                bit 6 = swz
1374                           //           all: volatile op (bit 31, stripped at lowering)
1375   [IntrArgMemOnly, IntrReadMem, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
1376   ImmArg<ArgIndex<3>>], "", [SDNPMemOperand]>,
1377   AMDGPURsrcIntrinsic<0>;
1378 def int_amdgcn_raw_ptr_buffer_load_format : AMDGPURawPtrBufferLoad<llvm_anyfloat_ty>;
1379 def int_amdgcn_raw_ptr_buffer_load : AMDGPURawPtrBufferLoad;
1380 
1381 class AMDGPURawPtrAtomicBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
1382   [data_ty],
1383   [AMDGPUBufferRsrcTy,// rsrc(SGPR)
1384    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
1385    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1386    llvm_i32_ty],      // auxiliary data (imm, cachepolicy     (bit 0 = glc,
1387                       //                                       bit 1 = slc,
1388                       //                                       bit 2 = dlc on gfx10+),
1389                       //                      swizzled buffer (bit 3 = swz))
1390   [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
1391   AMDGPURsrcIntrinsic<0>;
1392 def int_amdgcn_raw_ptr_atomic_buffer_load : AMDGPURawPtrAtomicBufferLoad;
1393 
1394 class AMDGPUStructBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
1395   [data_ty],
1396   [llvm_v4i32_ty,    // rsrc(SGPR)
1397    llvm_i32_ty,      // vindex(VGPR)
1398    llvm_i32_ty,      // offset(VGPR/imm, included in bounds checking and swizzling)
1399    llvm_i32_ty,      // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1400    llvm_i32_ty],     // auxiliary/cachepolicy(imm):
1401                      //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1402                      //                bit 3 = swz, bit 4 = scc (gfx90a)
1403                      //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1404                      //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1405                      //                bit 6 = swz
1406                      //           all: volatile op (bit 31, stripped at lowering)
1407   [IntrReadMem, ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
1408   AMDGPURsrcIntrinsic<0>;
1409 def int_amdgcn_struct_buffer_load_format : AMDGPUStructBufferLoad;
1410 def int_amdgcn_struct_buffer_load : AMDGPUStructBufferLoad;
1411 
1412 class AMDGPUStructAtomicBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
1413   [data_ty],
1414   [llvm_v4i32_ty,    // rsrc(SGPR)
1415    llvm_i32_ty,      // vindex(VGPR)
1416    llvm_i32_ty,      // offset(VGPR/imm, included in bounds checking and swizzling)
1417    llvm_i32_ty,      // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1418    llvm_i32_ty],     // auxiliary/cachepolicy(imm):
1419                      //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1420                      //                bit 3 = swz, bit 4 = scc (gfx90a)
1421                      //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1422                      //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1423                      //                bit 6 = swz
1424                      //           all: volatile op (bit 31, stripped at lowering)
1425   [ImmArg<ArgIndex<4>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
1426   AMDGPURsrcIntrinsic<0>;
1427 def int_amdgcn_struct_atomic_buffer_load : AMDGPUStructAtomicBufferLoad;
1428 
1429 class AMDGPUStructPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
1430   [data_ty],
1431   [AMDGPUBufferRsrcTy,    // rsrc(SGPR)
1432    llvm_i32_ty,           // vindex(VGPR)
1433    llvm_i32_ty,           // offset(VGPR/imm, included in bounds checking and swizzling)
1434    llvm_i32_ty,           // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1435    llvm_i32_ty],          // auxiliary/cachepolicy(imm):
1436                           //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1437                           //                bit 3 = swz, bit 4 = scc (gfx90a)
1438                           //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1439                           //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1440                           //                bit 6 = swz
1441                           //           all: volatile op (bit 31, stripped at lowering)
1442   [IntrArgMemOnly, IntrReadMem, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
1443    ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
1444   AMDGPURsrcIntrinsic<0>;
1445 def int_amdgcn_struct_ptr_buffer_load_format : AMDGPUStructPtrBufferLoad;
1446 def int_amdgcn_struct_ptr_buffer_load : AMDGPUStructPtrBufferLoad;
1447 
1448 class AMDGPUStructPtrAtomicBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
1449   [data_ty],
1450   [AMDGPUBufferRsrcTy,    // rsrc(SGPR)
1451    llvm_i32_ty,           // vindex(VGPR)
1452    llvm_i32_ty,           // offset(VGPR/imm, included in bounds checking and swizzling)
1453    llvm_i32_ty,           // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1454    llvm_i32_ty],          // auxiliary/cachepolicy(imm):
1455                           //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1456                           //                bit 3 = swz, bit 4 = scc (gfx90a)
1457                           //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1458                           //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1459                           //                bit 6 = swz
1460                           //           all: volatile op (bit 31, stripped at lowering)
1461   [IntrArgMemOnly, NoCapture<ArgIndex<0>>,
1462    ImmArg<ArgIndex<4>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
1463   AMDGPURsrcIntrinsic<0>;
1464 def int_amdgcn_struct_ptr_atomic_buffer_load : AMDGPUStructPtrAtomicBufferLoad;
1465 
1466 class AMDGPURawBufferStore<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
1467   [],
1468   [data_ty,          // vdata(VGPR)
1469    llvm_v4i32_ty,    // rsrc(SGPR)
1470    llvm_i32_ty,      // offset(VGPR/imm, included in bounds checking and swizzling)
1471    llvm_i32_ty,      // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1472    llvm_i32_ty],     // auxiliary/cachepolicy(imm):
1473                      //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1474                      //                bit 3 = swz, bit 4 = scc (gfx90a)
1475                      //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1476                      //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1477                      //                bit 6 = swz
1478                      //           all: volatile op (bit 31, stripped at lowering)
1479   [IntrWriteMem, ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
1480   AMDGPURsrcIntrinsic<1>;
1481 def int_amdgcn_raw_buffer_store_format : AMDGPURawBufferStore<llvm_anyfloat_ty>;
1482 def int_amdgcn_raw_buffer_store : AMDGPURawBufferStore;
1483 
1484 class AMDGPURawPtrBufferStore<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
1485   [],
1486   [data_ty,               // vdata(VGPR)
1487    AMDGPUBufferRsrcTy,    // rsrc(SGPR)
1488    llvm_i32_ty,           // offset(VGPR/imm, included in bounds checking and swizzling)
1489    llvm_i32_ty,           // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1490    llvm_i32_ty],          // auxiliary/cachepolicy(imm):
1491                           //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1492                           //                bit 3 = swz, bit 4 = scc (gfx90a)
1493                           //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1494                           //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1495                           //                bit 6 = swz
1496                           //           all: volatile op (bit 31, stripped at lowering)
1497   [IntrArgMemOnly, IntrWriteMem, WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
1498   ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
1499   AMDGPURsrcIntrinsic<1>;
1500 def int_amdgcn_raw_ptr_buffer_store_format : AMDGPURawPtrBufferStore<llvm_anyfloat_ty>;
1501 def int_amdgcn_raw_ptr_buffer_store : AMDGPURawPtrBufferStore;
1502 
1503 class AMDGPUStructBufferStore<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
1504   [],
1505   [data_ty,          // vdata(VGPR)
1506    llvm_v4i32_ty,    // rsrc(SGPR)
1507    llvm_i32_ty,      // vindex(VGPR)
1508    llvm_i32_ty,      // offset(VGPR/imm, included in bounds checking and swizzling)
1509    llvm_i32_ty,      // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1510    llvm_i32_ty],     // auxiliary/cachepolicy(imm):
1511                      //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1512                      //                bit 3 = swz, bit 4 = scc (gfx90a)
1513                      //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1514                      //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1515                      //                bit 6 = swz
1516                      //           all: volatile op (bit 31, stripped at lowering)
1517   [IntrWriteMem, ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
1518   AMDGPURsrcIntrinsic<1>;
1519 def int_amdgcn_struct_buffer_store_format : AMDGPUStructBufferStore;
1520 def int_amdgcn_struct_buffer_store : AMDGPUStructBufferStore;
1521 
1522 class AMDGPUStructPtrBufferStore<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
1523   [],
1524   [data_ty,               // vdata(VGPR)
1525    AMDGPUBufferRsrcTy,    // rsrc(SGPR)
1526    llvm_i32_ty,           // vindex(VGPR)
1527    llvm_i32_ty,           // offset(VGPR/imm, included in bounds checking and swizzling)
1528    llvm_i32_ty,           // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1529    llvm_i32_ty],          // auxiliary/cachepolicy(imm):
1530                           //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1531                           //                bit 3 = swz, bit 4 = scc (gfx90a)
1532                           //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1533                           //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1534                           //                bit 6 = swz
1535                           //           all: volatile op (bit 31, stripped at lowering)
1536   [IntrArgMemOnly, IntrWriteMem, WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
1537    ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
1538   AMDGPURsrcIntrinsic<1>;
1539 def int_amdgcn_struct_ptr_buffer_store_format : AMDGPUStructPtrBufferStore;
1540 def int_amdgcn_struct_ptr_buffer_store : AMDGPUStructPtrBufferStore;
1541 
1542 class AMDGPURawBufferAtomic<LLVMType data_ty = llvm_any_ty> : Intrinsic <
1543   [data_ty],
1544   [LLVMMatchType<0>,  // vdata(VGPR)
1545    llvm_v4i32_ty,     // rsrc(SGPR)
1546    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
1547    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1548    llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc, ..., bit 31 = volatile)
1549   [ImmArg<ArgIndex<4>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
1550   AMDGPURsrcIntrinsic<1, 0>;
1551 def int_amdgcn_raw_buffer_atomic_swap : AMDGPURawBufferAtomic;
1552 def int_amdgcn_raw_buffer_atomic_add : AMDGPURawBufferAtomic;
1553 def int_amdgcn_raw_buffer_atomic_sub : AMDGPURawBufferAtomic;
1554 def int_amdgcn_raw_buffer_atomic_smin : AMDGPURawBufferAtomic;
1555 def int_amdgcn_raw_buffer_atomic_umin : AMDGPURawBufferAtomic;
1556 def int_amdgcn_raw_buffer_atomic_fmin : AMDGPURawBufferAtomic<llvm_anyfloat_ty>;
1557 def int_amdgcn_raw_buffer_atomic_smax : AMDGPURawBufferAtomic;
1558 def int_amdgcn_raw_buffer_atomic_umax : AMDGPURawBufferAtomic;
1559 def int_amdgcn_raw_buffer_atomic_fmax : AMDGPURawBufferAtomic<llvm_anyfloat_ty>;
1560 def int_amdgcn_raw_buffer_atomic_and : AMDGPURawBufferAtomic;
1561 def int_amdgcn_raw_buffer_atomic_or : AMDGPURawBufferAtomic;
1562 def int_amdgcn_raw_buffer_atomic_xor : AMDGPURawBufferAtomic;
1563 def int_amdgcn_raw_buffer_atomic_inc : AMDGPURawBufferAtomic;
1564 def int_amdgcn_raw_buffer_atomic_dec : AMDGPURawBufferAtomic;
1565 def int_amdgcn_raw_buffer_atomic_cond_sub_u32 : AMDGPURawBufferAtomic;
1566 def int_amdgcn_raw_buffer_atomic_cmpswap : Intrinsic<
1567   [llvm_anyint_ty],
1568   [LLVMMatchType<0>,  // src(VGPR)
1569    LLVMMatchType<0>,  // cmp(VGPR)
1570    llvm_v4i32_ty,     // rsrc(SGPR)
1571    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
1572    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1573    llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc, ..., bit 31 = volatile)
1574   [ImmArg<ArgIndex<5>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
1575   AMDGPURsrcIntrinsic<2, 0>;
1576 
1577 class AMDGPURawPtrBufferAtomic<LLVMType data_ty = llvm_any_ty> : Intrinsic <
1578   [data_ty],
1579   [LLVMMatchType<0>,            // vdata(VGPR)
1580    AMDGPUBufferRsrcTy,          // rsrc(SGPR)
1581    llvm_i32_ty,                 // offset(VGPR/imm, included in bounds checking and swizzling)
1582    llvm_i32_ty,                 // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1583    llvm_i32_ty],                // cachepolicy(imm; bit 1 = slc, ..., bit 31 = volatile)
1584   [IntrArgMemOnly, NoCapture<ArgIndex<1>>,
1585    ImmArg<ArgIndex<4>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
1586   AMDGPURsrcIntrinsic<1, 0>;
1587 
1588 def int_amdgcn_raw_ptr_buffer_atomic_swap : AMDGPURawPtrBufferAtomic;
1589 def int_amdgcn_raw_ptr_buffer_atomic_add : AMDGPURawPtrBufferAtomic;
1590 def int_amdgcn_raw_ptr_buffer_atomic_sub : AMDGPURawPtrBufferAtomic;
1591 def int_amdgcn_raw_ptr_buffer_atomic_smin : AMDGPURawPtrBufferAtomic;
1592 def int_amdgcn_raw_ptr_buffer_atomic_umin : AMDGPURawPtrBufferAtomic;
1593 def int_amdgcn_raw_ptr_buffer_atomic_fmin : AMDGPURawPtrBufferAtomic<llvm_anyfloat_ty>;
1594 def int_amdgcn_raw_ptr_buffer_atomic_smax : AMDGPURawPtrBufferAtomic;
1595 def int_amdgcn_raw_ptr_buffer_atomic_umax : AMDGPURawPtrBufferAtomic;
1596 def int_amdgcn_raw_ptr_buffer_atomic_fmax : AMDGPURawPtrBufferAtomic<llvm_anyfloat_ty>;
1597 def int_amdgcn_raw_ptr_buffer_atomic_and : AMDGPURawPtrBufferAtomic;
1598 def int_amdgcn_raw_ptr_buffer_atomic_or : AMDGPURawPtrBufferAtomic;
1599 def int_amdgcn_raw_ptr_buffer_atomic_xor : AMDGPURawPtrBufferAtomic;
1600 def int_amdgcn_raw_ptr_buffer_atomic_inc : AMDGPURawPtrBufferAtomic;
1601 def int_amdgcn_raw_ptr_buffer_atomic_dec : AMDGPURawPtrBufferAtomic;
1602 def int_amdgcn_raw_ptr_buffer_atomic_cond_sub_u32 : AMDGPURawPtrBufferAtomic;
1603 def int_amdgcn_raw_ptr_buffer_atomic_cmpswap : Intrinsic<
1604   [llvm_anyint_ty],
1605   [LLVMMatchType<0>,  // src(VGPR)
1606    LLVMMatchType<0>,  // cmp(VGPR)
1607    AMDGPUBufferRsrcTy, // rsrc(SGPR)
1608    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
1609    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1610    llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc, ..., bit 31 = volatile)
1611   [IntrArgMemOnly, NoCapture<ArgIndex<2>>,
1612    ImmArg<ArgIndex<5>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
1613   AMDGPURsrcIntrinsic<2, 0>;
1614 
1615 // gfx908 intrinsic
1616 def int_amdgcn_raw_buffer_atomic_fadd : AMDGPURawBufferAtomic<llvm_anyfloat_ty>;
1617 
1618 // Supports float and <2 x half> on gfx908. Supports v2bf16 on gfx90a, gfx940, gfx950, gfx12+.
1619 def int_amdgcn_raw_ptr_buffer_atomic_fadd : AMDGPURawPtrBufferAtomic<llvm_anyfloat_ty>;
1620 
1621 class AMDGPUStructBufferAtomic<LLVMType data_ty = llvm_any_ty> : Intrinsic <
1622   [data_ty],
1623   [LLVMMatchType<0>,  // vdata(VGPR)
1624    llvm_v4i32_ty,     // rsrc(SGPR)
1625    llvm_i32_ty,       // vindex(VGPR)
1626    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
1627    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1628    llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc, ..., bit 31 = volatile)
1629   [ImmArg<ArgIndex<5>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
1630   AMDGPURsrcIntrinsic<1, 0>;
1631 def int_amdgcn_struct_buffer_atomic_swap : AMDGPUStructBufferAtomic;
1632 def int_amdgcn_struct_buffer_atomic_add : AMDGPUStructBufferAtomic;
1633 def int_amdgcn_struct_buffer_atomic_sub : AMDGPUStructBufferAtomic;
1634 def int_amdgcn_struct_buffer_atomic_smin : AMDGPUStructBufferAtomic;
1635 def int_amdgcn_struct_buffer_atomic_umin : AMDGPUStructBufferAtomic;
1636 def int_amdgcn_struct_buffer_atomic_smax : AMDGPUStructBufferAtomic;
1637 def int_amdgcn_struct_buffer_atomic_umax : AMDGPUStructBufferAtomic;
1638 def int_amdgcn_struct_buffer_atomic_and : AMDGPUStructBufferAtomic;
1639 def int_amdgcn_struct_buffer_atomic_or : AMDGPUStructBufferAtomic;
1640 def int_amdgcn_struct_buffer_atomic_xor : AMDGPUStructBufferAtomic;
1641 def int_amdgcn_struct_buffer_atomic_inc : AMDGPUStructBufferAtomic;
1642 def int_amdgcn_struct_buffer_atomic_dec : AMDGPUStructBufferAtomic;
1643 def int_amdgcn_struct_buffer_atomic_cond_sub_u32 : AMDGPUStructBufferAtomic;
1644 def int_amdgcn_struct_buffer_atomic_cmpswap : Intrinsic<
1645   [llvm_anyint_ty],
1646   [LLVMMatchType<0>,  // src(VGPR)
1647    LLVMMatchType<0>,  // cmp(VGPR)
1648    llvm_v4i32_ty,     // rsrc(SGPR)
1649    llvm_i32_ty,       // vindex(VGPR)
1650    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
1651    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1652    llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc, ..., bit 31 = volatile)
1653   [ImmArg<ArgIndex<6>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
1654   AMDGPURsrcIntrinsic<2, 0>;
1655 
1656 class AMDGPUStructPtrBufferAtomic<LLVMType data_ty = llvm_any_ty> : Intrinsic <
1657   [data_ty],
1658   [LLVMMatchType<0>,            // vdata(VGPR)
1659    AMDGPUBufferRsrcTy,          // rsrc(SGPR)
1660    llvm_i32_ty,                 // vindex(VGPR)
1661    llvm_i32_ty,                 // offset(VGPR/imm, included in bounds checking and swizzling)
1662    llvm_i32_ty,                 // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1663    llvm_i32_ty],                // cachepolicy(imm; bit 1 = slc, ..., bit 31 = volatile)
1664   [IntrArgMemOnly, NoCapture<ArgIndex<1>>,
1665    ImmArg<ArgIndex<5>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
1666   AMDGPURsrcIntrinsic<1, 0>;
1667 def int_amdgcn_struct_ptr_buffer_atomic_swap : AMDGPUStructPtrBufferAtomic;
1668 def int_amdgcn_struct_ptr_buffer_atomic_add : AMDGPUStructPtrBufferAtomic;
1669 def int_amdgcn_struct_ptr_buffer_atomic_sub : AMDGPUStructPtrBufferAtomic;
1670 def int_amdgcn_struct_ptr_buffer_atomic_smin : AMDGPUStructPtrBufferAtomic;
1671 def int_amdgcn_struct_ptr_buffer_atomic_umin : AMDGPUStructPtrBufferAtomic;
1672 def int_amdgcn_struct_ptr_buffer_atomic_smax : AMDGPUStructPtrBufferAtomic;
1673 def int_amdgcn_struct_ptr_buffer_atomic_umax : AMDGPUStructPtrBufferAtomic;
1674 def int_amdgcn_struct_ptr_buffer_atomic_and : AMDGPUStructPtrBufferAtomic;
1675 def int_amdgcn_struct_ptr_buffer_atomic_or : AMDGPUStructPtrBufferAtomic;
1676 def int_amdgcn_struct_ptr_buffer_atomic_xor : AMDGPUStructPtrBufferAtomic;
1677 def int_amdgcn_struct_ptr_buffer_atomic_inc : AMDGPUStructPtrBufferAtomic;
1678 def int_amdgcn_struct_ptr_buffer_atomic_dec : AMDGPUStructPtrBufferAtomic;
1679 def int_amdgcn_struct_ptr_buffer_atomic_cond_sub_u32 : AMDGPUStructPtrBufferAtomic;
1680 def int_amdgcn_struct_ptr_buffer_atomic_cmpswap : Intrinsic<
1681   [llvm_anyint_ty],
1682   [LLVMMatchType<0>,  // src(VGPR)
1683    LLVMMatchType<0>,  // cmp(VGPR)
1684    AMDGPUBufferRsrcTy, // rsrc(SGPR)
1685    llvm_i32_ty,       // vindex(VGPR)
1686    llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
1687    llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1688    llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc, ..., bit 31 = volatile)
1689   [IntrArgMemOnly, NoCapture<ArgIndex<2>>,
1690    ImmArg<ArgIndex<6>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
1691   AMDGPURsrcIntrinsic<2, 0>;
1692 
1693 // gfx908 intrinsic. Supports v2bf16 on gfx12+ and gfx950
1694 def int_amdgcn_struct_buffer_atomic_fadd : AMDGPUStructBufferAtomic<llvm_anyfloat_ty>;
1695 def int_amdgcn_struct_ptr_buffer_atomic_fadd : AMDGPUStructPtrBufferAtomic<llvm_anyfloat_ty>;
1696 
1697 // gfx90a intrinsics
1698 def int_amdgcn_struct_buffer_atomic_fmin : AMDGPUStructBufferAtomic<llvm_anyfloat_ty>;
1699 def int_amdgcn_struct_buffer_atomic_fmax : AMDGPUStructBufferAtomic<llvm_anyfloat_ty>;
1700 
1701 def int_amdgcn_struct_ptr_buffer_atomic_fmin : AMDGPUStructPtrBufferAtomic<llvm_anyfloat_ty>;
1702 def int_amdgcn_struct_ptr_buffer_atomic_fmax : AMDGPUStructPtrBufferAtomic<llvm_anyfloat_ty>;
1703 
1704 // tbuffer intrinsics, with:
1705 // - raw and struct variants
1706 // - joint format field
1707 // - joint cachepolicy field
1708 def int_amdgcn_raw_tbuffer_load : DefaultAttrsIntrinsic <
1709     [llvm_any_ty],    // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1710     [llvm_v4i32_ty,   // rsrc(SGPR)
1711      llvm_i32_ty,     // offset(VGPR/imm, included in bounds checking and swizzling)
1712      llvm_i32_ty,     // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1713      llvm_i32_ty,     // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1714      llvm_i32_ty],    // auxiliary/cachepolicy(imm):
1715                       //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1716                       //                bit 3 = swz, bit 4 = scc (gfx90a)
1717                       //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1718                       //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1719                       //                bit 6 = swz
1720     [IntrReadMem,
1721      ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
1722   AMDGPURsrcIntrinsic<0>;
1723 
1724 def int_amdgcn_raw_ptr_tbuffer_load : DefaultAttrsIntrinsic <
1725     [llvm_any_ty],       // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1726     [AMDGPUBufferRsrcTy, // rsrc(SGPR)
1727      llvm_i32_ty,       // offset(VGPR/imm, included in bounds` checking and swizzling)
1728      llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1729      llvm_i32_ty,       // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1730      llvm_i32_ty],      // auxiliary/cachepolicy(imm):
1731                         //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1732                         //                bit 3 = swz, bit 4 = scc (gfx90a)
1733                         //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1734                         //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1735                         //                bit 6 = swz
1736                         //           all: volatile op (bit 31, stripped at lowering)
1737     [IntrArgMemOnly, IntrReadMem, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
1738      ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
1739   AMDGPURsrcIntrinsic<0>;
1740 
1741 def int_amdgcn_raw_tbuffer_store : DefaultAttrsIntrinsic <
1742     [],
1743     [llvm_any_ty,    // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1744      llvm_v4i32_ty,  // rsrc(SGPR)
1745      llvm_i32_ty,    // offset(VGPR/imm, included in bounds checking and swizzling)
1746      llvm_i32_ty,    // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1747      llvm_i32_ty,    // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1748      llvm_i32_ty],   // auxiliary/cachepolicy(imm):
1749                      //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1750                      //                bit 3 = swz, bit 4 = scc (gfx90a)
1751                      //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1752                      //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1753                      //                bit 6 = swz
1754                      //           all: volatile op (bit 31, stripped at lowering)
1755     [IntrWriteMem,
1756      ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
1757   AMDGPURsrcIntrinsic<1>;
1758 
1759 def int_amdgcn_raw_ptr_tbuffer_store : DefaultAttrsIntrinsic <
1760     [],
1761     [llvm_any_ty,    // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1762      AMDGPUBufferRsrcTy, // rsrc(SGPR)
1763      llvm_i32_ty,    // offset(VGPR/imm, included in bounds checking and swizzling)
1764      llvm_i32_ty,    // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1765      llvm_i32_ty,    // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1766      llvm_i32_ty],   // auxiliary/cachepolicy(imm):
1767                      //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1768                      //                bit 3 = swz, bit 4 = scc (gfx90a)
1769                      //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1770                      //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1771                      //                bit 6 = swz
1772                      //           all: volatile op (bit 31, stripped at lowering)
1773     [IntrArgMemOnly, IntrWriteMem, WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
1774      ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
1775   AMDGPURsrcIntrinsic<1>;
1776 
1777 def int_amdgcn_struct_tbuffer_load : DefaultAttrsIntrinsic <
1778     [llvm_any_ty],    // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1779     [llvm_v4i32_ty,   // rsrc(SGPR)
1780      llvm_i32_ty,     // vindex(VGPR)
1781      llvm_i32_ty,     // offset(VGPR/imm, included in bounds checking and swizzling)
1782      llvm_i32_ty,     // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1783      llvm_i32_ty,     // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1784      llvm_i32_ty],    // auxiliary/cachepolicy(imm):
1785                       //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1786                       //                bit 3 = swz, bit 4 = scc (gfx90a)
1787                       //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1788                       //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1789                       //                bit 6 = swz
1790                       //           all: volatile op (bit 31, stripped at lowering)
1791     [IntrReadMem,
1792      ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
1793   AMDGPURsrcIntrinsic<0>;
1794 
1795 def int_amdgcn_struct_ptr_tbuffer_load : DefaultAttrsIntrinsic <
1796     [llvm_any_ty],       // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1797     [AMDGPUBufferRsrcTy, // rsrc(SGPR)
1798      llvm_i32_ty,        // vindex(VGPR)
1799      llvm_i32_ty,        // offset(VGPR/imm, included in bounds checking and swizzling)
1800      llvm_i32_ty,        // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1801      llvm_i32_ty,        // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1802      llvm_i32_ty],       // auxiliary/cachepolicy(imm):
1803                          //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1804                          //                bit 3 = swz, bit 4 = scc (gfx90a)
1805                          //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1806                          //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1807                          //                bit 6 = swz
1808                          //           all: volatile op (bit 31, stripped at lowering)
1809     [IntrArgMemOnly, IntrReadMem, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
1810      ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
1811   AMDGPURsrcIntrinsic<0>;
1812 
1813 def int_amdgcn_struct_ptr_tbuffer_store : DefaultAttrsIntrinsic <
1814     [],
1815     [llvm_any_ty,        // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1816      AMDGPUBufferRsrcTy, // rsrc(SGPR)
1817      llvm_i32_ty,        // vindex(VGPR)
1818      llvm_i32_ty,        // offset(VGPR/imm, included in bounds checking and swizzling)
1819      llvm_i32_ty,        // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1820      llvm_i32_ty,        // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1821      llvm_i32_ty],       // auxiliary/cachepolicy(imm):
1822                          //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1823                          //                bit 3 = swz, bit 4 = scc (gfx90a)
1824                          //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1825                          //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1826                          //                bit 6 = swz
1827                          //           all: volatile op (bit 31, stripped at lowering)
1828     [IntrArgMemOnly, IntrWriteMem, WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
1829      ImmArg<ArgIndex<5>>, ImmArg<ArgIndex<6>>], "", [SDNPMemOperand]>,
1830   AMDGPURsrcIntrinsic<1>;
1831 
1832 def int_amdgcn_struct_tbuffer_store : DefaultAttrsIntrinsic <
1833     [],
1834     [llvm_any_ty,    // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1835      llvm_v4i32_ty,  // rsrc(SGPR)
1836      llvm_i32_ty,    // vindex(VGPR)
1837      llvm_i32_ty,    // offset(VGPR/imm, included in bounds checking and swizzling)
1838      llvm_i32_ty,    // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1839      llvm_i32_ty,    // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1840      llvm_i32_ty],   // auxiliary/cachepolicy(imm):
1841                      //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1842                      //                bit 3 = swz, bit 4 = scc (gfx90a)
1843                      //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1844                      //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1845                      //                bit 6 = swz
1846                      //           all: volatile op (bit 31, stripped at lowering)
1847     [IntrWriteMem,
1848      ImmArg<ArgIndex<5>>, ImmArg<ArgIndex<6>>], "", [SDNPMemOperand]>,
1849   AMDGPURsrcIntrinsic<1>;
1850 
1851 class AMDGPURawBufferLoadLDS : Intrinsic <
1852   [],
1853   [llvm_v4i32_ty,             // rsrc(SGPR)
1854    LLVMQualPointerType<3>,    // LDS base offset
1855    llvm_i32_ty,               // Data byte size: 1/2/4 (/12/16 for gfx950)
1856    llvm_i32_ty,               // voffset(VGPR, included in bounds checking and swizzling)
1857    llvm_i32_ty,               // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1858    llvm_i32_ty,               // imm offset(imm, included in bounds checking and swizzling)
1859    llvm_i32_ty],              // auxiliary/cachepolicy(imm):
1860                               //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1861                               //                bit 3 = swz, bit 4 = scc (gfx90a)
1862                               //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1863                               //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1864                               //                bit 6 = swz
1865                               //           all: volatile op (bit 31, stripped at lowering)
1866   [IntrWillReturn, NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>,
1867    ImmArg<ArgIndex<6>>, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>;
1868 def int_amdgcn_raw_buffer_load_lds : AMDGPURawBufferLoadLDS;
1869 
1870 class AMDGPURawPtrBufferLoadLDS : Intrinsic <
1871   [],
1872   [AMDGPUBufferRsrcTy,        // rsrc(SGPR)
1873    LLVMQualPointerType<3>,    // LDS base offset
1874    llvm_i32_ty,               // Data byte size: 1/2/4 (/12/16 for gfx950)
1875    llvm_i32_ty,               // voffset(VGPR, included in bounds checking and swizzling)
1876    llvm_i32_ty,               // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1877    llvm_i32_ty,               // imm offset(imm, included in bounds checking and swizzling)
1878    llvm_i32_ty],              // auxiliary/cachepolicy(imm):
1879                               //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1880                               //                bit 3 = swz, bit 4 = scc (gfx90a)
1881                               //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1882                               //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1883                               //                bit 6 = swz
1884                               //           all: volatile op (bit 31, stripped at lowering)
1885   [IntrWillReturn, IntrArgMemOnly,
1886    ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
1887    WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
1888    ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>,
1889    ImmArg<ArgIndex<6>>, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>;
1890 def int_amdgcn_raw_ptr_buffer_load_lds : AMDGPURawPtrBufferLoadLDS;
1891 
1892 class AMDGPUStructBufferLoadLDS : Intrinsic <
1893   [],
1894   [llvm_v4i32_ty,             // rsrc(SGPR)
1895    LLVMQualPointerType<3>,    // LDS base offset
1896    llvm_i32_ty,               // Data byte size: 1/2/4 (/12/16 for gfx950)
1897    llvm_i32_ty,               // vindex(VGPR)
1898    llvm_i32_ty,               // voffset(VGPR, included in bounds checking and swizzling)
1899    llvm_i32_ty,               // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1900    llvm_i32_ty,               // imm offset(imm, included in bounds checking and swizzling)
1901    llvm_i32_ty],              // auxiliary/cachepolicy(imm):
1902                               //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1903                               //                bit 3 = swz, bit 4 = scc (gfx90a)
1904                               //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1905                               //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1906                               //                bit 6 = swz
1907                               //           all: volatile op (bit 31, stripped at lowering)
1908   [IntrWillReturn, NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<6>>,
1909    ImmArg<ArgIndex<7>>, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>;
1910 def int_amdgcn_struct_buffer_load_lds : AMDGPUStructBufferLoadLDS;
1911 
1912 class AMDGPUStructPtrBufferLoadLDS : Intrinsic <
1913   [],
1914   [AMDGPUBufferRsrcTy,        // rsrc(SGPR)
1915    LLVMQualPointerType<3>,    // LDS base offset
1916    llvm_i32_ty,               // Data byte size: 1/2/4 (/12/16 for gfx950)
1917    llvm_i32_ty,               // vindex(VGPR)
1918    llvm_i32_ty,               // voffset(VGPR, included in bounds checking and swizzling)
1919    llvm_i32_ty,               // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1920    llvm_i32_ty,               // imm offset(imm, included in bounds checking and swizzling)
1921    llvm_i32_ty],              // auxiliary/cachepolicy(imm):
1922                               //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
1923                               //                bit 3 = swz, bit 4 = scc (gfx90a)
1924                               //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
1925                               //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
1926                               //                bit 6 = swz
1927                               //           all: volatile op (bit 31, stripped at lowering)
1928   [IntrWillReturn, IntrArgMemOnly,
1929    ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
1930    WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
1931    ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<6>>,
1932    ImmArg<ArgIndex<7>>, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>;
1933 def int_amdgcn_struct_ptr_buffer_load_lds : AMDGPUStructPtrBufferLoadLDS;
1934 
1935 def int_amdgcn_s_buffer_prefetch_data : DefaultAttrsIntrinsic <
1936   [],
1937   [AMDGPUBufferRsrcTy, // rsrc(SGPR)
1938    llvm_i32_ty,        // offset (imm)
1939    llvm_i32_ty],       // len (SGPR/imm)
1940   [IntrInaccessibleMemOrArgMemOnly, ImmArg<ArgIndex<1>>], "", [SDNPMemOperand]>,
1941   AMDGPURsrcIntrinsic<0>,
1942   ClangBuiltin<"__builtin_amdgcn_s_buffer_prefetch_data">;
1943 
1944 } // defset AMDGPUBufferIntrinsics
1945 
1946 // Uses that do not set the done bit should set IntrWriteMem on the
1947 // call site.
1948 def int_amdgcn_exp : DefaultAttrsIntrinsic <[], [
1949   llvm_i32_ty,       // tgt,
1950   llvm_i32_ty,       // en
1951   llvm_any_ty,       // src0 (f32 or i32)
1952   LLVMMatchType<0>,  // src1
1953   LLVMMatchType<0>,  // src2
1954   LLVMMatchType<0>,  // src3
1955   llvm_i1_ty,        // done
1956   llvm_i1_ty         // vm (ignored on GFX11+)
1957   ],
1958   [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<6>>,
1959    ImmArg<ArgIndex<7>>, IntrWriteMem, IntrInaccessibleMemOnly]
1960 >;
1961 
1962 // exp with row_en bit set. Only supported on GFX11+.
1963 def int_amdgcn_exp_row : DefaultAttrsIntrinsic <[], [
1964   llvm_i32_ty,       // tgt,
1965   llvm_i32_ty,       // en
1966   llvm_any_ty,       // src0 (f32 or i32)
1967   LLVMMatchType<0>,  // src1
1968   LLVMMatchType<0>,  // src2
1969   LLVMMatchType<0>,  // src3
1970   llvm_i1_ty,        // done
1971   llvm_i32_ty],      // row number
1972   [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<6>>,
1973    IntrWriteMem, IntrInaccessibleMemOnly]
1974 >;
1975 
1976 // exp with compr bit set. Not supported on GFX11+.
1977 def int_amdgcn_exp_compr : DefaultAttrsIntrinsic <[], [
1978   llvm_i32_ty,       // tgt,
1979   llvm_i32_ty,       // en
1980   llvm_anyvector_ty, // src0 (v2f16 or v2i16)
1981   LLVMMatchType<0>,  // src1
1982   llvm_i1_ty,        // done
1983   llvm_i1_ty],       // vm
1984   [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<4>>,
1985    ImmArg<ArgIndex<5>>, IntrWriteMem, IntrInaccessibleMemOnly]
1986 >;
1987 
1988 def int_amdgcn_buffer_wbinvl1_sc :
1989   ClangBuiltin<"__builtin_amdgcn_buffer_wbinvl1_sc">,
1990   DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;
1991 
1992 def int_amdgcn_buffer_wbinvl1 :
1993   ClangBuiltin<"__builtin_amdgcn_buffer_wbinvl1">,
1994   DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;
1995 
1996 def int_amdgcn_s_dcache_inv :
1997   ClangBuiltin<"__builtin_amdgcn_s_dcache_inv">,
1998   DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;
1999 
2000 def int_amdgcn_s_memtime :
2001   ClangBuiltin<"__builtin_amdgcn_s_memtime">,
2002   DefaultAttrsIntrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrHasSideEffects]>;
2003 
2004 def int_amdgcn_s_sleep :
2005   ClangBuiltin<"__builtin_amdgcn_s_sleep">,
2006   DefaultAttrsIntrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem,
2007                                 IntrHasSideEffects]> {
2008 }
2009 
2010 def int_amdgcn_s_sleep_var
2011     : ClangBuiltin<"__builtin_amdgcn_s_sleep_var">,
2012       Intrinsic<[], [llvm_i32_ty],
2013                 [IntrNoMem, IntrHasSideEffects, IntrWillReturn]> {
2014 }
2015 
2016 def int_amdgcn_s_nop :
2017   DefaultAttrsIntrinsic<[], [llvm_i16_ty], [ImmArg<ArgIndex<0>>, IntrNoMem,
2018                                 IntrHasSideEffects]> {
2019 }
2020 
2021 def int_amdgcn_s_incperflevel :
2022   ClangBuiltin<"__builtin_amdgcn_s_incperflevel">,
2023   DefaultAttrsIntrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem,
2024                                 IntrHasSideEffects]> {
2025 }
2026 
2027 def int_amdgcn_s_decperflevel :
2028   ClangBuiltin<"__builtin_amdgcn_s_decperflevel">,
2029   DefaultAttrsIntrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem,
2030                                 IntrHasSideEffects]> {
2031 }
2032 
2033 def int_amdgcn_s_sethalt :
2034   DefaultAttrsIntrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem,
2035                                 IntrHasSideEffects]>;
2036 
2037 def int_amdgcn_s_setprio :
2038   ClangBuiltin<"__builtin_amdgcn_s_setprio">,
2039   DefaultAttrsIntrinsic<[], [llvm_i16_ty], [ImmArg<ArgIndex<0>>, IntrNoMem,
2040                                 IntrHasSideEffects]>;
2041 
2042 def int_amdgcn_s_ttracedata :
2043   ClangBuiltin<"__builtin_amdgcn_s_ttracedata">,
2044   DefaultAttrsIntrinsic<[], [llvm_i32_ty],
2045                         [IntrNoMem, IntrHasSideEffects]>;
2046 
2047 def int_amdgcn_s_ttracedata_imm :
2048   ClangBuiltin<"__builtin_amdgcn_s_ttracedata_imm">,
2049   DefaultAttrsIntrinsic<[], [llvm_i16_ty],
2050                         [IntrNoMem, IntrHasSideEffects, ImmArg<ArgIndex<0>>]>;
2051 
2052 // This is IntrHasSideEffects so it can be used to read cycle counters.
2053 def int_amdgcn_s_getreg :
2054   ClangBuiltin<"__builtin_amdgcn_s_getreg">,
2055   DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty],
2056   [IntrNoMem, IntrHasSideEffects, ImmArg<ArgIndex<0>>]
2057 >;
2058 
2059 // Note this can be used to set FP environment properties that are
2060 // unsafe to change in non-strictfp functions. The register properties
2061 // available (and value required to access them) may differ per
2062 // subtarget. llvm.amdgcn.s.setreg(hwmode, value)
2063 def int_amdgcn_s_setreg :
2064   ClangBuiltin<"__builtin_amdgcn_s_setreg">,
2065   DefaultAttrsIntrinsic<[], [llvm_i32_ty, llvm_i32_ty],
2066   [IntrNoMem, IntrHasSideEffects, ImmArg<ArgIndex<0>>]
2067 >;
2068 
2069 // int_amdgcn_s_getpc is provided to allow a specific style of position
2070 // independent code to determine the high part of its address when it is
2071 // known (through convention) that the code and any data of interest does
2072 // not cross a 4Gb address boundary. Use for any other purpose may not
2073 // produce the desired results as optimizations may cause code movement,
2074 // especially as we explicitly use IntrNoMem to allow optimizations.
2075 // This intrinsic always returns PC sign-extended from 48 bits even if the
2076 // s_getpc_b64 instruction returns a zero-extended value.
2077 def int_amdgcn_s_getpc :
2078   ClangBuiltin<"__builtin_amdgcn_s_getpc">,
2079   DefaultAttrsIntrinsic<[llvm_i64_ty], [], [NoUndef<RetIndex>, IntrNoMem,
2080                                 IntrSpeculatable, IntrWillReturn]>;
2081 
2082 // __builtin_amdgcn_interp_mov <param>, <attr_chan>, <attr>, <m0>
2083 // param values: 0 = P10, 1 = P20, 2 = P0
2084 def int_amdgcn_interp_mov :
2085   ClangBuiltin<"__builtin_amdgcn_interp_mov">,
2086   DefaultAttrsIntrinsic<[llvm_float_ty],
2087             [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
2088             [IntrNoMem, IntrSpeculatable,
2089               ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
2090 
2091 // __builtin_amdgcn_interp_p1 <i>, <attr_chan>, <attr>, <m0>
2092 // This intrinsic reads from lds, but the memory values are constant,
2093 // so it behaves like IntrNoMem.
2094 def int_amdgcn_interp_p1 :
2095   ClangBuiltin<"__builtin_amdgcn_interp_p1">,
2096   DefaultAttrsIntrinsic<[llvm_float_ty],
2097             [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
2098             [IntrNoMem, IntrSpeculatable,
2099              ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
2100 
2101 // __builtin_amdgcn_interp_p2 <p1>, <j>, <attr_chan>, <attr>, <m0>
2102 def int_amdgcn_interp_p2 :
2103   ClangBuiltin<"__builtin_amdgcn_interp_p2">,
2104   DefaultAttrsIntrinsic<[llvm_float_ty],
2105             [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
2106             [IntrNoMem, IntrSpeculatable,
2107              ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
2108           // See int_amdgcn_v_interp_p1 for why this is IntrNoMem.
2109 
2110 // __builtin_amdgcn_interp_p1_f16 <i>, <attr_chan>, <attr>, <high>, <m0>
2111 // high selects whether high or low 16-bits are loaded from LDS
2112 def int_amdgcn_interp_p1_f16 :
2113   ClangBuiltin<"__builtin_amdgcn_interp_p1_f16">,
2114   DefaultAttrsIntrinsic<[llvm_float_ty],
2115             [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i32_ty],
2116             [IntrNoMem, IntrSpeculatable,
2117              ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
2118 
2119 // __builtin_amdgcn_interp_p2_f16 <p1>, <j>, <attr_chan>, <attr>, <high>, <m0>
2120 // high selects whether high or low 16-bits are loaded from LDS
2121 def int_amdgcn_interp_p2_f16 :
2122   ClangBuiltin<"__builtin_amdgcn_interp_p2_f16">,
2123   DefaultAttrsIntrinsic<[llvm_half_ty],
2124             [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i32_ty],
2125             [IntrNoMem, IntrSpeculatable,
2126              ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
2127 
2128 // llvm.amdgcn.lds.direct.load <m0>
2129 // The input argument is m0, which contains a packed combination of address
2130 // offset and flags describing the data type.
2131 def int_amdgcn_lds_direct_load :
2132   DefaultAttrsIntrinsic<[llvm_any_ty], // overloaded for types u8, u16, i32/f32, i8, i16
2133             [llvm_i32_ty],
2134             [IntrReadMem, IntrSpeculatable]>;
2135 
2136 // llvm.amdgcn.lds.param.load <attr_chan>, <attr>, <m0>
2137 // Like interp intrinsics, this reads from lds, but the memory values are constant,
2138 // so it behaves like IntrNoMem.
2139 def int_amdgcn_lds_param_load :
2140   DefaultAttrsIntrinsic<[llvm_float_ty],
2141             [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
2142             [IntrNoMem, IntrSpeculatable,
2143              ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
2144 
2145 // llvm.amdgcn.interp.inreg.p10 <p>, <i>, <p0>
2146 def int_amdgcn_interp_inreg_p10 :
2147   DefaultAttrsIntrinsic<[llvm_float_ty],
2148             [llvm_float_ty, llvm_float_ty, llvm_float_ty],
2149             [IntrNoMem, IntrSpeculatable]>;
2150 
2151 // llvm.amdgcn.interp.inreg.p2 <p>, <j>, <tmp>
2152 def int_amdgcn_interp_inreg_p2 :
2153   DefaultAttrsIntrinsic<[llvm_float_ty],
2154             [llvm_float_ty, llvm_float_ty, llvm_float_ty],
2155             [IntrNoMem, IntrSpeculatable]>;
2156 
2157 // llvm.amdgcn.interp.inreg.p10.f16 <p>, <i>, <p0>, <high>
2158 // high selects whether high or low 16-bits are used for p and p0 operands
2159 def int_amdgcn_interp_inreg_p10_f16:
2160   DefaultAttrsIntrinsic<[llvm_float_ty],
2161             [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i1_ty],
2162             [IntrNoMem, IntrSpeculatable,
2163              ImmArg<ArgIndex<3>>]>;
2164 
2165 // llvm.amdgcn.interp.inreg.p2.f16 <p>, <j>, <tmp>, <high>
2166 // high selects whether high or low 16-bits are used for p operand
2167 def int_amdgcn_interp_inreg_p2_f16 :
2168   DefaultAttrsIntrinsic<[llvm_half_ty],
2169             [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i1_ty],
2170             [IntrNoMem, IntrSpeculatable,
2171              ImmArg<ArgIndex<3>>]>;
2172 
2173 // llvm.amdgcn.interp.p10.rtz.f16 <p>, <i>, <p0>, <high>
2174 // gfx11+ fp16 interpolation intrinsic, with round-toward-zero rounding mode.
2175 // high selects whether high or low 16-bits are used for p and p0 operands
2176 def int_amdgcn_interp_p10_rtz_f16:
2177   DefaultAttrsIntrinsic<[llvm_float_ty],
2178             [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i1_ty],
2179             [IntrNoMem, IntrSpeculatable,
2180              ImmArg<ArgIndex<3>>]>;
2181 
2182 // llvm.amdgcn.interp.p2.rtz.f16 <p>, <j>, <tmp>, <high>
2183 // gfx11+ fp16 interpolation intrinsic, with round-toward-zero rounding mode.
2184 // high selects whether high or low 16-bits are used for p operand
2185 def int_amdgcn_interp_p2_rtz_f16 :
2186   DefaultAttrsIntrinsic<[llvm_half_ty],
2187             [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i1_ty],
2188             [IntrNoMem, IntrSpeculatable,
2189              ImmArg<ArgIndex<3>>]>;
2190 
2191 // Deprecated: use llvm.amdgcn.live.mask instead.
2192 def int_amdgcn_ps_live : DefaultAttrsIntrinsic <
2193   [llvm_i1_ty],
2194   [],
2195   [IntrNoMem]>;
2196 
2197 // Query currently live lanes.
2198 // Returns true if lane is live (and not a helper lane).
2199 def int_amdgcn_live_mask : DefaultAttrsIntrinsic <[llvm_i1_ty],
2200   [], [NoUndef<RetIndex>, IntrReadMem, IntrInaccessibleMemOnly]
2201 >;
2202 
2203 def int_amdgcn_mbcnt_lo :
2204   ClangBuiltin<"__builtin_amdgcn_mbcnt_lo">,
2205   DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
2206    [IntrNoMem]>;
2207 
2208 def int_amdgcn_mbcnt_hi :
2209   ClangBuiltin<"__builtin_amdgcn_mbcnt_hi">,
2210   DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
2211             [IntrNoMem]>;
2212 
2213 // llvm.amdgcn.ds.swizzle src offset
2214 def int_amdgcn_ds_swizzle :
2215   ClangBuiltin<"__builtin_amdgcn_ds_swizzle">,
2216   Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
2217             [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree,
2218              ImmArg<ArgIndex<1>>]>;
2219 
2220 def int_amdgcn_ubfe : DefaultAttrsIntrinsic<[llvm_anyint_ty],
2221     [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
2222     [IntrNoMem, IntrSpeculatable]
2223 >;
2224 
2225 def int_amdgcn_sbfe : DefaultAttrsIntrinsic<[llvm_anyint_ty],
2226     [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
2227     [IntrNoMem, IntrSpeculatable]
2228 >;
2229 
2230 def int_amdgcn_lerp :
2231   ClangBuiltin<"__builtin_amdgcn_lerp">,
2232   DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
2233   [IntrNoMem, IntrSpeculatable]
2234 >;
2235 
2236 def int_amdgcn_sad_u8 :
2237   ClangBuiltin<"__builtin_amdgcn_sad_u8">,
2238   DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
2239   [IntrNoMem, IntrSpeculatable]
2240 >;
2241 
2242 def int_amdgcn_msad_u8 :
2243   ClangBuiltin<"__builtin_amdgcn_msad_u8">,
2244   DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
2245   [IntrNoMem, IntrSpeculatable]
2246 >;
2247 
2248 def int_amdgcn_sad_hi_u8 :
2249   ClangBuiltin<"__builtin_amdgcn_sad_hi_u8">,
2250   DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
2251   [IntrNoMem, IntrSpeculatable]
2252 >;
2253 
2254 def int_amdgcn_sad_u16 :
2255   ClangBuiltin<"__builtin_amdgcn_sad_u16">,
2256   DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
2257   [IntrNoMem, IntrSpeculatable]
2258 >;
2259 
2260 def int_amdgcn_qsad_pk_u16_u8 :
2261   ClangBuiltin<"__builtin_amdgcn_qsad_pk_u16_u8">,
2262   DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
2263   [IntrNoMem, IntrSpeculatable]
2264 >;
2265 
2266 def int_amdgcn_mqsad_pk_u16_u8 :
2267   ClangBuiltin<"__builtin_amdgcn_mqsad_pk_u16_u8">,
2268   DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
2269   [IntrNoMem, IntrSpeculatable]
2270 >;
2271 
2272 def int_amdgcn_mqsad_u32_u8 :
2273   ClangBuiltin<"__builtin_amdgcn_mqsad_u32_u8">,
2274   DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_i64_ty, llvm_i32_ty, llvm_v4i32_ty],
2275   [IntrNoMem, IntrSpeculatable]
2276 >;
2277 
2278 def int_amdgcn_cvt_pk_u8_f32 :
2279   ClangBuiltin<"__builtin_amdgcn_cvt_pk_u8_f32">,
2280   DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
2281   [IntrNoMem, IntrSpeculatable]
2282 >;
2283 
2284 def int_amdgcn_icmp :
2285   Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty, LLVMMatchType<1>, llvm_i32_ty],
2286             [IntrNoMem, IntrConvergent,
2287              ImmArg<ArgIndex<2>>, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2288 
2289 def int_amdgcn_fcmp :
2290   Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>, llvm_i32_ty],
2291             [IntrNoMem, IntrConvergent,
2292              ImmArg<ArgIndex<2>>, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2293 
2294 // Returns a bitfield(i32 or i64) containing the result of its i1 argument
2295 // in all active lanes, and zero in all inactive lanes.
2296 def int_amdgcn_ballot :
2297   Intrinsic<[llvm_anyint_ty], [llvm_i1_ty],
2298             [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2299 
2300 def int_amdgcn_inverse_ballot :
2301   Intrinsic<[llvm_i1_ty], [llvm_anyint_ty],
2302             [IntrNoMem, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2303 
2304 // Lowers to S_BITREPLICATE_B64_B32.
2305 // The argument must be uniform; otherwise, the result is undefined.
2306 def int_amdgcn_s_bitreplicate :
2307   DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
2308 
2309 // Lowers to S_QUADMASK_B{32,64}
2310 // The argument must be uniform; otherwise, the result is undefined.
2311 def int_amdgcn_s_quadmask :
2312   DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem, IntrConvergent]>;
2313 
2314 // Lowers to S_WQM_B{32,64}
2315 // The argument must be uniform; otherwise, the result is undefined.
2316 // Does not set WQM; merely calculates the bitmask.
2317 def int_amdgcn_s_wqm :
2318   DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem, IntrConvergent]>;
2319 
2320 class AMDGPUWaveReduce<LLVMType data_ty = llvm_anyint_ty> : Intrinsic<
2321     [data_ty],
2322     [
2323       LLVMMatchType<0>,   // llvm value to reduce (SGPR/VGPR)
2324       llvm_i32_ty         // Reduction Strategy Switch for lowering ( 0: Default,
2325                           //                                          1: Iterative strategy, and
2326                           //                                          2. DPP)
2327     ],
2328     [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree, ImmArg<ArgIndex<1>>]>;
2329 
2330 def int_amdgcn_wave_reduce_umin : AMDGPUWaveReduce;
2331 def int_amdgcn_wave_reduce_umax : AMDGPUWaveReduce;
2332 
2333 def int_amdgcn_readfirstlane :
2334   Intrinsic<[llvm_any_ty], [LLVMMatchType<0>],
2335             [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2336 
2337 // The lane argument must be uniform across the currently active threads of the
2338 // current wave. Otherwise, the result is undefined.
2339 def int_amdgcn_readlane :
2340   Intrinsic<[llvm_any_ty], [LLVMMatchType<0>, llvm_i32_ty],
2341             [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2342 
2343 // The value to write and lane select arguments must be uniform across the
2344 // currently active threads of the current wave. Otherwise, the result is
2345 // undefined.
2346 def int_amdgcn_writelane :
2347   Intrinsic<[llvm_any_ty], [
2348     LLVMMatchType<0>,   // uniform value to write: returned by the selected lane
2349     llvm_i32_ty,        // uniform lane select
2350     LLVMMatchType<0>    // returned by all lanes other than the selected one
2351   ],
2352   [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
2353 >;
2354 
2355 def int_amdgcn_alignbyte : ClangBuiltin<"__builtin_amdgcn_alignbyte">,
2356   DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
2357   [IntrNoMem, IntrSpeculatable]
2358 >;
2359 
2360 // mul24 intrinsics can return i32 or i64.
2361 // When returning i64, they're lowered to a mul24/mulhi24 pair.
2362 def int_amdgcn_mul_i24 : DefaultAttrsIntrinsic<[llvm_anyint_ty],
2363   [llvm_i32_ty, llvm_i32_ty],
2364   [IntrNoMem, IntrSpeculatable]
2365 >;
2366 
2367 def int_amdgcn_mul_u24 : DefaultAttrsIntrinsic<[llvm_anyint_ty],
2368   [llvm_i32_ty, llvm_i32_ty],
2369   [IntrNoMem, IntrSpeculatable]
2370 >;
2371 
2372 def int_amdgcn_mulhi_i24 : DefaultAttrsIntrinsic<[llvm_i32_ty],
2373   [llvm_i32_ty, llvm_i32_ty],
2374   [IntrNoMem, IntrSpeculatable]
2375 >;
2376 
2377 def int_amdgcn_mulhi_u24 : DefaultAttrsIntrinsic<[llvm_i32_ty],
2378   [llvm_i32_ty, llvm_i32_ty],
2379   [IntrNoMem, IntrSpeculatable]
2380 >;
2381 
2382 // llvm.amdgcn.ds.gws.init(i32 bar_val, i32 resource_id)
2383 //
2384 // bar_val is the total number of waves that will wait on this
2385 // barrier, minus 1.
2386 def int_amdgcn_ds_gws_init :
2387   ClangBuiltin<"__builtin_amdgcn_ds_gws_init">,
2388   Intrinsic<[],
2389   [llvm_i32_ty, llvm_i32_ty],
2390   [IntrConvergent, IntrWriteMem,
2391    IntrInaccessibleMemOnly, IntrWillReturn, IntrNoCallback, IntrNoFree], "",
2392   [SDNPMemOperand]
2393 >;
2394 
2395 // llvm.amdgcn.ds.gws.barrier(i32 vsrc0, i32 resource_id)
2396 // bar_val is the total number of waves that will wait on this
2397 // barrier, minus 1.
2398 def int_amdgcn_ds_gws_barrier :
2399   ClangBuiltin<"__builtin_amdgcn_ds_gws_barrier">,
2400   Intrinsic<[],
2401   [llvm_i32_ty, llvm_i32_ty],
2402   [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn, IntrNoCallback, IntrNoFree], "",
2403   [SDNPMemOperand]
2404 >;
2405 
2406 // llvm.amdgcn.ds.gws.sema.v(i32 resource_id)
2407 def int_amdgcn_ds_gws_sema_v :
2408   ClangBuiltin<"__builtin_amdgcn_ds_gws_sema_v">,
2409   Intrinsic<[],
2410   [llvm_i32_ty],
2411   [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn, IntrNoCallback, IntrNoFree], "",
2412   [SDNPMemOperand]
2413 >;
2414 
2415 // llvm.amdgcn.ds.gws.sema.br(i32 vsrc, i32 resource_id)
2416 def int_amdgcn_ds_gws_sema_br :
2417   ClangBuiltin<"__builtin_amdgcn_ds_gws_sema_br">,
2418   Intrinsic<[],
2419   [llvm_i32_ty, llvm_i32_ty],
2420   [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn, IntrNoCallback, IntrNoFree], "",
2421   [SDNPMemOperand]
2422 >;
2423 
2424 // llvm.amdgcn.ds.gws.sema.p(i32 resource_id)
2425 def int_amdgcn_ds_gws_sema_p :
2426   ClangBuiltin<"__builtin_amdgcn_ds_gws_sema_p">,
2427   Intrinsic<[],
2428   [llvm_i32_ty],
2429   [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn, IntrNoCallback, IntrNoFree], "",
2430   [SDNPMemOperand]
2431 >;
2432 
2433 // llvm.amdgcn.ds.gws.sema.release.all(i32 resource_id)
2434 def int_amdgcn_ds_gws_sema_release_all :
2435   ClangBuiltin<"__builtin_amdgcn_ds_gws_sema_release_all">,
2436   Intrinsic<[],
2437   [llvm_i32_ty],
2438   [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn, IntrNoCallback, IntrNoFree], "",
2439   [SDNPMemOperand]
2440 >;
2441 
2442 
2443 // Copies the source value to the destination value, with the guarantee that
2444 // the source value is computed as if the entire program were executed in WQM.
2445 def int_amdgcn_wqm : Intrinsic<[llvm_any_ty],
2446   [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn, IntrNoCallback, IntrNoFree]
2447 >;
2448 
2449 // Copies the source value to the destination value, such that the source
2450 // is computed as if the entire program were executed in WQM if any other
2451 // program code executes in WQM.
2452 def int_amdgcn_softwqm : Intrinsic<[llvm_any_ty],
2453   [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn, IntrNoCallback, IntrNoFree]
2454 >;
2455 
2456 // Return true if at least one thread within the pixel quad passes true into
2457 // the function.
2458 def int_amdgcn_wqm_vote : Intrinsic<[llvm_i1_ty],
2459   [llvm_i1_ty], [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
2460 >;
2461 
2462 // If false, set EXEC=0 for the current thread until the end of program.
2463 // FIXME: Should this be IntrNoMem, IntrHasSideEffects, or IntrWillReturn?
2464 def int_amdgcn_kill : Intrinsic<[], [llvm_i1_ty], [IntrNoCallback, IntrNoFree]>;
2465 
2466 def int_amdgcn_endpgm : ClangBuiltin<"__builtin_amdgcn_endpgm">,
2467   Intrinsic<[], [], [IntrNoReturn, IntrCold, IntrNoMem, IntrHasSideEffects, IntrConvergent,
2468                      IntrNoCallback, IntrNoFree]
2469 >;
2470 
2471 // If false, mark all active lanes as helper lanes until the end of program.
2472 def int_amdgcn_wqm_demote : Intrinsic<[],
2473   [llvm_i1_ty], [IntrWriteMem, IntrInaccessibleMemOnly, IntrNoCallback, IntrNoFree]
2474 >;
2475 
2476 // Copies the active channels of the source value to the destination value,
2477 // with the guarantee that the source value is computed as if the entire
2478 // program were executed in Whole Wavefront Mode, i.e. with all channels
2479 // enabled, with a few exceptions: - Phi nodes which require WWM return an
2480 // undefined value.
2481 def int_amdgcn_strict_wwm : Intrinsic<[llvm_any_ty],
2482   [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable,
2483                        IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
2484 >;
2485 // Deprecated. Use int_amdgcn_strict_wwm instead.
2486 def int_amdgcn_wwm : Intrinsic<[llvm_any_ty],
2487   [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable,
2488                        IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
2489 >;
2490 def int_amdgcn_strict_wqm : Intrinsic<[llvm_any_ty],
2491   [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable,
2492                        IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
2493 >;
2494 
2495 // Given a value, copies it while setting all the inactive lanes to a given
2496 // value. Note that OpenGL helper lanes are considered active, so if the
2497 // program ever uses WQM, then the instruction and the first source will be
2498 // computed in WQM.
2499 def int_amdgcn_set_inactive :
2500   Intrinsic<[llvm_any_ty],
2501             [LLVMMatchType<0>, // value to be copied
2502              LLVMMatchType<0>], // value for the inactive lanes to take
2503             [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2504 
2505 // Similar to int_amdgcn_set_inactive, but the value for the inactive lanes must
2506 // be a VGPR function argument.
2507 // Can only be used in functions with the `amdgpu_cs_chain` or
2508 // `amdgpu_cs_chain_preserve` calling conventions, and only in uniform control
2509 // flow.
2510 def int_amdgcn_set_inactive_chain_arg :
2511   Intrinsic<[llvm_anyint_ty],
2512             [LLVMMatchType<0>, // value to be copied
2513              LLVMMatchType<0>], // value for the inactive lanes to take
2514             [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2515 
2516 // Return if the given flat pointer points to a local memory address.
2517 def int_amdgcn_is_shared : ClangBuiltin<"__builtin_amdgcn_is_shared">,
2518   DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_ptr_ty],
2519   [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>]
2520 >;
2521 
2522 // Return if the given flat pointer points to a prvate memory address.
2523 def int_amdgcn_is_private : ClangBuiltin<"__builtin_amdgcn_is_private">,
2524   DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_ptr_ty],
2525   [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>]
2526 >;
2527 
2528 // A uniform tail call to a function with the `amdgpu_cs_chain` or
2529 // `amdgpu_cs_chain_preserve` calling convention. It will populate the SGPRs
2530 // starting at s0 and the VGPRs starting at v8, set EXEC and perform a jump to
2531 // the given function.
2532 // Can only be used in functions with the `amdgpu_cs`, `amdgpu_cs_chain` or
2533 // `amdgpu_cs_chain_preserve` calling conventions, and only in uniform control
2534 // flow.
2535 def int_amdgcn_cs_chain:
2536   Intrinsic<[],
2537             [llvm_anyptr_ty, // The function to jump to.
2538              llvm_anyint_ty, // Value to put in EXEC (should be i32 or i64).
2539              llvm_any_ty, // Arguments that will be copied into SGPRs (s0+).
2540                           // Must be uniform.
2541              llvm_any_ty, // Arguments that will be copied into VGPRs (v8+).
2542                           // Need not be uniform.
2543              llvm_i32_ty, // Flags.
2544              llvm_vararg_ty // Additional arguments. Only present if Flags is
2545                             // non-zero.
2546             ],
2547             [IntrConvergent, IntrNoReturn, ImmArg<ArgIndex<4>>]>;
2548 
2549 
2550 //===----------------------------------------------------------------------===//
2551 // CI+ Intrinsics
2552 //===----------------------------------------------------------------------===//
2553 
2554 def int_amdgcn_s_dcache_inv_vol :
2555   ClangBuiltin<"__builtin_amdgcn_s_dcache_inv_vol">,
2556   DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;
2557 
2558 def int_amdgcn_buffer_wbinvl1_vol :
2559   ClangBuiltin<"__builtin_amdgcn_buffer_wbinvl1_vol">,
2560   DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;
2561 
2562 //===----------------------------------------------------------------------===//
2563 // VI Intrinsics
2564 //===----------------------------------------------------------------------===//
2565 
2566 // The llvm.amdgcn.mov.dpp intrinsic represents the mov.dpp operation in AMDGPU.
2567 // This operation is being deprecated and can be replaced with
2568 // llvm.amdgcn.update.dpp.
2569 // llvm.amdgcn.mov.dpp <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
2570 def int_amdgcn_mov_dpp :
2571   Intrinsic<[llvm_anyint_ty],
2572             [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
2573              llvm_i1_ty],
2574              [IntrNoMem, IntrConvergent, IntrWillReturn,
2575              ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>,
2576              ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, IntrNoCallback, IntrNoFree]>;
2577 
2578 // The llvm.amdgcn.update.dpp intrinsic represents the update.dpp operation in
2579 // AMDGPU. It takes an old value, a source operand, a DPP control operand, a row
2580 // mask, a bank mask, and a bound control. This operation is equivalent to a
2581 // sequence of v_mov_b32 operations. It is preferred over llvm.amdgcn.mov.dpp
2582 // for future use.
2583 // llvm.amdgcn.update.dpp <old> <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
2584 // Should be equivalent to:
2585 // v_mov_b32 <dest> <old>
2586 // v_mov_b32 <dest> <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
2587 def int_amdgcn_update_dpp :
2588   Intrinsic<[llvm_any_ty],
2589             [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty,
2590             llvm_i32_ty, llvm_i32_ty, llvm_i1_ty],
2591              [IntrNoMem, IntrConvergent, IntrWillReturn,
2592               ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>,
2593               ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>, IntrNoCallback, IntrNoFree]>;
2594 
2595 def int_amdgcn_s_dcache_wb :
2596   ClangBuiltin<"__builtin_amdgcn_s_dcache_wb">,
2597   Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2598 
2599 def int_amdgcn_s_dcache_wb_vol :
2600   ClangBuiltin<"__builtin_amdgcn_s_dcache_wb_vol">,
2601   Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2602 
2603 def int_amdgcn_s_memrealtime :
2604   ClangBuiltin<"__builtin_amdgcn_s_memrealtime">,
2605   Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2606 
2607 // llvm.amdgcn.ds.permute <index> <src>
2608 def int_amdgcn_ds_permute :
2609   ClangBuiltin<"__builtin_amdgcn_ds_permute">,
2610   Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
2611     [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2612 
2613 // llvm.amdgcn.ds.bpermute <index> <src>
2614 def int_amdgcn_ds_bpermute :
2615   ClangBuiltin<"__builtin_amdgcn_ds_bpermute">,
2616   Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
2617      [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2618 
2619 // llvm.amdgcn.perm <src0> <src1> <selector>
2620 def int_amdgcn_perm :
2621   ClangBuiltin<"__builtin_amdgcn_perm">,
2622   Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
2623      [IntrNoMem, IntrSpeculatable, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2624 
2625 //===----------------------------------------------------------------------===//
2626 // GFX9 Intrinsics
2627 //===----------------------------------------------------------------------===//
2628 
2629 class AMDGPUGlobalLoadLDS :
2630   ClangBuiltin<"__builtin_amdgcn_global_load_lds">,
2631   Intrinsic <
2632     [],
2633     [LLVMQualPointerType<1>,            // Base global pointer to load from
2634      LLVMQualPointerType<3>,            // LDS base pointer to store to
2635      llvm_i32_ty,                       // Data byte size: 1/2/4 (/12/16 for gfx950)
2636      llvm_i32_ty,                       // imm offset (applied to both global and LDS address)
2637      llvm_i32_ty],                      // auxiliary data (imm, cachepolicy (bit 0 = sc0,
2638                                         //                                   bit 1 = sc1,
2639                                         //                                   bit 4 = scc))
2640     [IntrWillReturn, NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
2641      ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, IntrNoCallback, IntrNoFree],
2642      "", [SDNPMemOperand]>;
2643 def int_amdgcn_global_load_lds : AMDGPUGlobalLoadLDS;
2644 
2645 // This is IntrHasSideEffects because it reads from a volatile hardware register.
2646 def int_amdgcn_pops_exiting_wave_id :
2647   DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrHasSideEffects]>;
2648 
2649 //===----------------------------------------------------------------------===//
2650 // GFX10 Intrinsics
2651 //===----------------------------------------------------------------------===//
2652 
2653 // llvm.amdgcn.permlane16 <old> <src0> <src1> <src2> <fi> <bound_control>
2654 def int_amdgcn_permlane16 :
2655   Intrinsic<[llvm_any_ty],
2656             [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i1_ty],
2657             [IntrNoMem, IntrConvergent, IntrWillReturn,
2658              ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>, IntrNoCallback, IntrNoFree]>;
2659 
2660 // llvm.amdgcn.permlanex16 <old> <src0> <src1> <src2> <fi> <bound_control>
2661 def int_amdgcn_permlanex16 :
2662   Intrinsic<[llvm_any_ty],
2663             [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i1_ty],
2664             [IntrNoMem, IntrConvergent, IntrWillReturn,
2665              ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>, IntrNoCallback, IntrNoFree]>;
2666 
2667 // llvm.amdgcn.mov.dpp8 <src> <sel>
2668 // <sel> is a 32-bit constant whose high 8 bits must be zero which selects
2669 // the lanes to read from.
2670 def int_amdgcn_mov_dpp8 :
2671   Intrinsic<[llvm_any_ty],
2672             [LLVMMatchType<0>, llvm_i32_ty],
2673             [IntrNoMem, IntrConvergent, IntrWillReturn,
2674              ImmArg<ArgIndex<1>>, IntrNoCallback, IntrNoFree]>;
2675 
2676 def int_amdgcn_s_get_waveid_in_workgroup :
2677   ClangBuiltin<"__builtin_amdgcn_s_get_waveid_in_workgroup">,
2678   Intrinsic<[llvm_i32_ty], [],
2679     [NoUndef<RetIndex>, IntrNoMem, IntrHasSideEffects, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2680 
2681 class AMDGPUAtomicRtn<LLVMType vt, LLVMType pt = llvm_anyptr_ty> : Intrinsic <
2682   [vt],
2683   [pt,  // vaddr
2684    vt], // vdata(VGPR)
2685   [IntrArgMemOnly, IntrWillReturn, NoCapture<ArgIndex<0>>, IntrNoCallback, IntrNoFree], "",
2686   [SDNPMemOperand]>;
2687 
2688 def int_amdgcn_global_atomic_csub : AMDGPUAtomicRtn<llvm_i32_ty>;
2689 
2690 // uint4 llvm.amdgcn.image.bvh.intersect.ray <node_ptr>, <ray_extent>, <ray_origin>,
2691 //                                           <ray_dir>, <ray_inv_dir>, <texture_descr>
2692 // <node_ptr> is i32 or i64.
2693 // <ray_dir> and <ray_inv_dir> are both v3f16 or both v3f32.
2694 def int_amdgcn_image_bvh_intersect_ray :
2695   DefaultAttrsIntrinsic<[llvm_v4i32_ty],
2696             [llvm_anyint_ty, llvm_float_ty, llvm_v3f32_ty, llvm_anyvector_ty,
2697              LLVMMatchType<1>, llvm_v4i32_ty],
2698             [IntrReadMem, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2699 
2700 //===----------------------------------------------------------------------===//
2701 // GFX11 Intrinsics
2702 //===----------------------------------------------------------------------===//
2703 
2704 // llvm.amdgcn.permlane64 <src0>
2705 def int_amdgcn_permlane64 :
2706   Intrinsic<[llvm_any_ty], [LLVMMatchType<0>],
2707             [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
2708 
2709 def int_amdgcn_ds_add_gs_reg_rtn :
2710   ClangBuiltin<"__builtin_amdgcn_ds_add_gs_reg_rtn">,
2711   Intrinsic<[llvm_anyint_ty], [llvm_i32_ty, llvm_i32_ty],
2712             [ImmArg<ArgIndex<1>>, IntrHasSideEffects, IntrWillReturn, IntrNoCallback, IntrNoFree],
2713             "", [SDNPMemOperand]>;
2714 
2715 def int_amdgcn_ds_sub_gs_reg_rtn :
2716   ClangBuiltin<"__builtin_amdgcn_ds_sub_gs_reg_rtn">,
2717   Intrinsic<[llvm_anyint_ty], [llvm_i32_ty, llvm_i32_ty],
2718             [ImmArg<ArgIndex<1>>, IntrHasSideEffects, IntrWillReturn, IntrNoCallback, IntrNoFree],
2719             "", [SDNPMemOperand]>;
2720 
2721 def int_amdgcn_ds_bvh_stack_rtn :
2722   Intrinsic<
2723     [llvm_i32_ty, llvm_i32_ty], // %vdst, %addr
2724     [
2725       llvm_i32_ty,   // %addr
2726       llvm_i32_ty,   // %data0
2727       llvm_v4i32_ty, // %data1
2728       llvm_i32_ty,   // %offset
2729     ],
2730     [ImmArg<ArgIndex<3>>, IntrWillReturn, IntrNoCallback, IntrNoFree]
2731   >;
2732 
2733 def int_amdgcn_s_wait_event_export_ready :
2734   ClangBuiltin<"__builtin_amdgcn_s_wait_event_export_ready">,
2735   Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]
2736 >;
2737 
2738 // WMMA (Wave Matrix Multiply-Accumulate) intrinsics
2739 //
2740 // These operations perform a matrix multiplication and accumulation of
2741 // the form: D = A * B + C .
2742 
2743 class AMDGPUWmmaIntrinsic<LLVMType AB, LLVMType CD> :
2744   Intrinsic<
2745     [CD], // %D
2746     [
2747       AB,               // %A
2748       LLVMMatchType<1>, // %B
2749       LLVMMatchType<0>, // %C
2750     ],
2751     [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
2752 >;
2753 
2754 class AMDGPUWmmaIntrinsicOPSEL<LLVMType AB, LLVMType CD> :
2755   Intrinsic<
2756     [CD], // %D
2757     [
2758       AB,               // %A
2759       LLVMMatchType<1>, // %B
2760       LLVMMatchType<0>, // %C
2761       llvm_i1_ty,       // %high (op_sel) for GFX11, 0 for GFX12
2762     ],
2763     [IntrNoMem, IntrConvergent, ImmArg<ArgIndex<3>>, IntrWillReturn, IntrNoCallback, IntrNoFree]
2764 >;
2765 
2766 class AMDGPUWmmaIntrinsicIU<LLVMType AB, LLVMType CD> :
2767   Intrinsic<
2768     [CD], // %D
2769     [
2770       llvm_i1_ty,       // %A_sign
2771       AB,               // %A
2772       llvm_i1_ty,       // %B_sign
2773       LLVMMatchType<1>, // %B
2774       LLVMMatchType<0>, // %C
2775       llvm_i1_ty,       // %clamp
2776     ],
2777     [IntrNoMem, IntrConvergent, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>, IntrWillReturn, IntrNoCallback, IntrNoFree]
2778 >;
2779 
2780 // WMMA GFX11Only
2781 
2782 // The OPSEL intrinsics read from and write to one half of the registers, selected by the op_sel bit.
2783 // The tied versions of the f16/bf16 wmma intrinsics tie the destination matrix registers to the input accumulator registers.
2784 // The content of the other 16-bit half is preserved from the input.
2785 
2786 defset list<Intrinsic> AMDGPUWMMAIntrinsicsGFX11 = {
2787 def int_amdgcn_wmma_f16_16x16x16_f16_tied   : AMDGPUWmmaIntrinsicOPSEL<llvm_anyfloat_ty, llvm_anyfloat_ty>;
2788 def int_amdgcn_wmma_bf16_16x16x16_bf16_tied : AMDGPUWmmaIntrinsicOPSEL<llvm_anyint_ty, llvm_anyint_ty>;
2789 
2790 // WMMA GFX11Plus
2791 
2792 def int_amdgcn_wmma_f32_16x16x16_f16   : AMDGPUWmmaIntrinsic<llvm_anyfloat_ty, llvm_anyfloat_ty>;
2793 def int_amdgcn_wmma_f32_16x16x16_bf16  : AMDGPUWmmaIntrinsic<llvm_anyint_ty, llvm_anyfloat_ty>;
2794 def int_amdgcn_wmma_i32_16x16x16_iu8   : AMDGPUWmmaIntrinsicIU<llvm_anyint_ty, llvm_anyint_ty>;
2795 def int_amdgcn_wmma_i32_16x16x16_iu4   : AMDGPUWmmaIntrinsicIU<llvm_anyint_ty, llvm_anyint_ty>;
2796 
2797 // GFX11: The OPSEL intrinsics read from and write to one half of the registers, selected by the op_sel bit.
2798 //        The content of the other 16-bit half is undefined.
2799 // GFX12: The op_sel bit must be 0.
2800 def int_amdgcn_wmma_f16_16x16x16_f16   : AMDGPUWmmaIntrinsicOPSEL<llvm_anyfloat_ty, llvm_anyfloat_ty>;
2801 def int_amdgcn_wmma_bf16_16x16x16_bf16 : AMDGPUWmmaIntrinsicOPSEL<llvm_anyint_ty, llvm_anyint_ty>;
2802 }
2803 
2804 //===----------------------------------------------------------------------===//
2805 // GFX12 Intrinsics
2806 //===----------------------------------------------------------------------===//
2807 
2808 // llvm.amdgcn.permlane16.var <old> <src0> <src1> <fi> <bound_control>
2809 def int_amdgcn_permlane16_var : ClangBuiltin<"__builtin_amdgcn_permlane16_var">,
2810   Intrinsic<[llvm_i32_ty],
2811             [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i1_ty],
2812             [IntrNoMem, IntrConvergent, IntrWillReturn,
2813              ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, IntrNoCallback, IntrNoFree]>;
2814 
2815 // llvm.amdgcn.permlanex16.var <old> <src0> <src1> <fi> <bound_control>
2816 def int_amdgcn_permlanex16_var : ClangBuiltin<"__builtin_amdgcn_permlanex16_var">,
2817   Intrinsic<[llvm_i32_ty],
2818             [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i1_ty],
2819             [IntrNoMem, IntrConvergent, IntrWillReturn,
2820              ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, IntrNoCallback, IntrNoFree]>;
2821 
2822 // SWMMAC (Wave Matrix(sparse) Multiply-Accumulate) intrinsics
2823 //
2824 // These operations perform a sparse matrix multiplication and accumulation of
2825 // the form: D = A * B + C.
2826 // A is sparse matrix, half the size of B, and is expanded using sparsity index.
2827 
2828 class AMDGPUSWmmacIntrinsicIdx<LLVMType A, LLVMType B, LLVMType CD, LLVMType Index> :
2829   Intrinsic<
2830     [CD],               // %D
2831     [
2832       A,                // %A
2833       B,                // %B
2834       LLVMMatchType<0>, // %C
2835       Index             // %Sparsity index for A
2836     ],
2837     [IntrNoMem, IntrConvergent, IntrWillReturn]
2838 >;
2839 
2840 class AMDGPUSWmmacIntrinsicIUIdx<LLVMType A, LLVMType B, LLVMType CD, LLVMType Index> :
2841   Intrinsic<
2842     [CD],               // %D
2843     [
2844       llvm_i1_ty,       // %A_sign
2845       A,                // %A
2846       llvm_i1_ty,       // %B_sign
2847       B,                // %B
2848       LLVMMatchType<0>, // %C
2849       Index,            // %Sparsity index for A
2850       llvm_i1_ty,       // %clamp
2851     ],
2852     [IntrNoMem, IntrConvergent, IntrWillReturn, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<6>>]
2853 >;
2854 
2855 defset list<Intrinsic> AMDGPUWMMAIntrinsicsGFX12 = {
2856 // WMMA (Wave Matrix Multiply-Accumulate) intrinsics
2857 //
2858 // These operations perform a matrix multiplication and accumulation of
2859 // the form: D = A * B + C .
2860 
2861 // A and B are <8 x fp8> or <8 x bf8>, but since fp8 and bf8 are not supported by llvm we use <2 x i32>.
2862 def int_amdgcn_wmma_f32_16x16x16_fp8_fp8 : AMDGPUWmmaIntrinsic<llvm_anyint_ty, llvm_anyfloat_ty>;
2863 def int_amdgcn_wmma_f32_16x16x16_fp8_bf8 : AMDGPUWmmaIntrinsic<llvm_anyint_ty, llvm_anyfloat_ty>;
2864 def int_amdgcn_wmma_f32_16x16x16_bf8_fp8 : AMDGPUWmmaIntrinsic<llvm_anyint_ty, llvm_anyfloat_ty>;
2865 def int_amdgcn_wmma_f32_16x16x16_bf8_bf8 : AMDGPUWmmaIntrinsic<llvm_anyint_ty, llvm_anyfloat_ty>;
2866 // A and B are <16 x iu4>.
2867 def int_amdgcn_wmma_i32_16x16x32_iu4     : AMDGPUWmmaIntrinsicIU<llvm_anyint_ty, llvm_anyint_ty>;
2868 
2869 def int_amdgcn_swmmac_f32_16x16x32_f16     : AMDGPUSWmmacIntrinsicIdx<llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
2870 def int_amdgcn_swmmac_f32_16x16x32_bf16    : AMDGPUSWmmacIntrinsicIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
2871 def int_amdgcn_swmmac_f16_16x16x32_f16     : AMDGPUSWmmacIntrinsicIdx<llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
2872 def int_amdgcn_swmmac_bf16_16x16x32_bf16   : AMDGPUSWmmacIntrinsicIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty>;
2873 def int_amdgcn_swmmac_i32_16x16x32_iu8     : AMDGPUSWmmacIntrinsicIUIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty>;
2874 def int_amdgcn_swmmac_i32_16x16x32_iu4     : AMDGPUSWmmacIntrinsicIUIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty>;
2875 def int_amdgcn_swmmac_i32_16x16x64_iu4     : AMDGPUSWmmacIntrinsicIUIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty>;
2876 def int_amdgcn_swmmac_f32_16x16x32_fp8_fp8 : AMDGPUSWmmacIntrinsicIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
2877 def int_amdgcn_swmmac_f32_16x16x32_fp8_bf8 : AMDGPUSWmmacIntrinsicIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
2878 def int_amdgcn_swmmac_f32_16x16x32_bf8_fp8 : AMDGPUSWmmacIntrinsicIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
2879 def int_amdgcn_swmmac_f32_16x16x32_bf8_bf8 : AMDGPUSWmmacIntrinsicIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
2880 }
2881 
2882 def int_amdgcn_global_atomic_ordered_add_b64 : AMDGPUAtomicRtn<llvm_i64_ty, global_ptr_ty>;
2883 
2884 def int_amdgcn_flat_atomic_fmin_num   : AMDGPUAtomicRtn<llvm_anyfloat_ty>;
2885 def int_amdgcn_flat_atomic_fmax_num   : AMDGPUAtomicRtn<llvm_anyfloat_ty>;
2886 def int_amdgcn_global_atomic_fmin_num : AMDGPUAtomicRtn<llvm_anyfloat_ty>;
2887 def int_amdgcn_global_atomic_fmax_num : AMDGPUAtomicRtn<llvm_anyfloat_ty>;
2888 
2889 def int_amdgcn_atomic_cond_sub_u32 : AMDGPUAtomicRtn<llvm_i32_ty>;
2890 
2891 class AMDGPULoadIntrinsic<LLVMType ptr_ty>:
2892   Intrinsic<
2893     [llvm_any_ty],
2894     [ptr_ty],
2895     [IntrReadMem, IntrWillReturn, IntrConvergent, NoCapture<ArgIndex<0>>, IntrNoCallback, IntrNoFree],
2896     "",
2897     [SDNPMemOperand]
2898   >;
2899 
2900 // Wave32
2901 // <2 x i32>    @llvm.amdgcn.global.load.tr.b64.v2i32(ptr addrspace(1))  -> global_load_tr_b64
2902 // <8 x i16>    @llvm.amdgcn.global.load.tr.b128.v8i16(ptr addrspace(1))  -> global_load_tr_b128
2903 // Wave64
2904 // i32          @llvm.amdgcn.global.load.tr.b64.i32(ptr addrspace(1))    -> global_load_tr_b64
2905 // <4 x i16>    @llvm.amdgcn.global.load.tr.b128.v4i16(ptr addrspace(1))  -> global_load_tr_b128
2906 
2907 def int_amdgcn_global_load_tr_b64  : AMDGPULoadIntrinsic<global_ptr_ty>;
2908 def int_amdgcn_global_load_tr_b128 : AMDGPULoadIntrinsic<global_ptr_ty>;
2909 def int_amdgcn_ds_read_tr4_b64     : AMDGPULoadIntrinsic<local_ptr_ty>;
2910 def int_amdgcn_ds_read_tr6_b96     : AMDGPULoadIntrinsic<local_ptr_ty>;
2911 def int_amdgcn_ds_read_tr8_b64     : AMDGPULoadIntrinsic<local_ptr_ty>;
2912 def int_amdgcn_ds_read_tr16_b64    : AMDGPULoadIntrinsic<local_ptr_ty>;
2913 
2914 // i32 @llvm.amdgcn.wave.id()
2915 def int_amdgcn_wave_id :
2916   DefaultAttrsIntrinsic<[llvm_i32_ty], [], [NoUndef<RetIndex>, IntrNoMem, IntrSpeculatable]>;
2917 
2918 def int_amdgcn_s_prefetch_data :
2919   Intrinsic<[],
2920   [llvm_anyptr_ty, // Pointer to a constant/global memory
2921    llvm_i32_ty],   // Length to prefetch 0-31 (1-32 chaunks, units of 128 bytes)
2922     [IntrInaccessibleMemOrArgMemOnly, IntrWillReturn, NoCapture<ArgIndex<0>>, IntrNoCallback, IntrNoFree],
2923     "", [SDNPMemOperand]
2924   >;
2925 
2926 //===----------------------------------------------------------------------===//
2927 // Deep learning intrinsics.
2928 //===----------------------------------------------------------------------===//
2929 
2930 // f32 %r = llvm.amdgcn.fdot2(v2f16 %a, v2f16 %b, f32 %c, i1 %clamp)
2931 //   %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
2932 def int_amdgcn_fdot2 :
2933   ClangBuiltin<"__builtin_amdgcn_fdot2">,
2934   DefaultAttrsIntrinsic<
2935     [llvm_float_ty], // %r
2936     [
2937       llvm_v2f16_ty, // %a
2938       llvm_v2f16_ty, // %b
2939       llvm_float_ty, // %c
2940       llvm_i1_ty     // %clamp
2941     ],
2942     [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
2943   >;
2944 
2945 // f16 %r = llvm.amdgcn.fdot2.f16.f16(v2f16 %a, v2f16 %b, f16 %c)
2946 //   %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
2947 def int_amdgcn_fdot2_f16_f16 :
2948   ClangBuiltin<"__builtin_amdgcn_fdot2_f16_f16">,
2949   DefaultAttrsIntrinsic<
2950     [llvm_half_ty],  // %r
2951     [
2952       llvm_v2f16_ty, // %a
2953       llvm_v2f16_ty, // %b
2954       llvm_half_ty   // %c
2955     ],
2956     [IntrNoMem, IntrSpeculatable]
2957   >;
2958 
2959 // bf16 %r = llvm.amdgcn.fdot2.bf16.bf16(v2bf16 %a, v2bf16 %b, bf16 %c)
2960 //   %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
2961 def int_amdgcn_fdot2_bf16_bf16 :
2962   ClangBuiltin<"__builtin_amdgcn_fdot2_bf16_bf16">,
2963   DefaultAttrsIntrinsic<
2964     [llvm_bfloat_ty],   // %r
2965     [
2966       llvm_v2bf16_ty, // %a
2967       llvm_v2bf16_ty, // %b
2968       llvm_bfloat_ty    // %c
2969     ],
2970     [IntrNoMem, IntrSpeculatable]
2971   >;
2972 
2973 // f32 %r = llvm.amdgcn.fdot2.f32.bf16(v2bf16 %a, v2bf16 %b, f32 %c, i1 %clamp)
2974 //   %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
2975 def int_amdgcn_fdot2_f32_bf16 :
2976   ClangBuiltin<"__builtin_amdgcn_fdot2_f32_bf16">,
2977   DefaultAttrsIntrinsic<
2978     [llvm_float_ty], // %r
2979     [
2980       llvm_v2bf16_ty, // %a
2981       llvm_v2bf16_ty, // %b
2982       llvm_float_ty, // %c
2983       llvm_i1_ty     // %clamp
2984     ],
2985     [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
2986   >;
2987 
2988 // f32 %r = llvm.amdgcn.fdot2c.f32.bf16(v2bf16 %a, v2bf16 %b, f32 %c, i1 %clamp)
2989 //   %r = %a[0] * %b[0] + %a[1] * %b[1] + c
2990 // TODO: This actually is similar to llvm.amdgcn.fdot2 intrinsics which produces
2991 // v_dot2c_f32_f16 on gfx940. Maybe we can consolidate these.
2992 
2993 def int_amdgcn_fdot2c_f32_bf16 :
2994   ClangBuiltin<"__builtin_amdgcn_fdot2c_f32_bf16">,
2995   DefaultAttrsIntrinsic<
2996     [llvm_float_ty], // %r
2997     [
2998       llvm_v2bf16_ty, // %a
2999       llvm_v2bf16_ty, // %b
3000       llvm_float_ty, // %c
3001       llvm_i1_ty     // %clamp
3002     ],
3003     [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
3004   >;
3005 
3006 // i32 %r = llvm.amdgcn.sdot2(v2i16 %a, v2i16 %b, i32 %c, i1 %clamp)
3007 //   %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
3008 def int_amdgcn_sdot2 :
3009   ClangBuiltin<"__builtin_amdgcn_sdot2">,
3010   DefaultAttrsIntrinsic<
3011     [llvm_i32_ty], // %r
3012     [
3013       llvm_v2i16_ty, // %a
3014       llvm_v2i16_ty, // %b
3015       llvm_i32_ty,   // %c
3016       llvm_i1_ty     // %clamp
3017     ],
3018     [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
3019   >;
3020 
3021 // u32 %r = llvm.amdgcn.udot2(v2u16 %a, v2u16 %b, u32 %c, i1 %clamp)
3022 //   %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
3023 def int_amdgcn_udot2 :
3024   ClangBuiltin<"__builtin_amdgcn_udot2">,
3025   DefaultAttrsIntrinsic<
3026     [llvm_i32_ty], // %r
3027     [
3028       llvm_v2i16_ty, // %a
3029       llvm_v2i16_ty, // %b
3030       llvm_i32_ty,   // %c
3031       llvm_i1_ty     // %clamp
3032     ],
3033     [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
3034   >;
3035 
3036 // i32 %r = llvm.amdgcn.sdot4(v4i8 (as i32) %a, v4i8 (as i32) %b, i32 %c, i1 %clamp)
3037 //   %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + %c
3038 def int_amdgcn_sdot4 :
3039   ClangBuiltin<"__builtin_amdgcn_sdot4">,
3040   DefaultAttrsIntrinsic<
3041     [llvm_i32_ty], // %r
3042     [
3043       llvm_i32_ty, // %a
3044       llvm_i32_ty, // %b
3045       llvm_i32_ty, // %c
3046       llvm_i1_ty   // %clamp
3047     ],
3048     [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
3049   >;
3050 
3051 // u32 %r = llvm.amdgcn.udot4(v4u8 (as u32) %a, v4u8 (as u32) %b, u32 %c, i1 %clamp)
3052 //   %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + %c
3053 def int_amdgcn_udot4 :
3054   ClangBuiltin<"__builtin_amdgcn_udot4">,
3055   DefaultAttrsIntrinsic<
3056     [llvm_i32_ty], // %r
3057     [
3058       llvm_i32_ty, // %a
3059       llvm_i32_ty, // %b
3060       llvm_i32_ty, // %c
3061       llvm_i1_ty   // %clamp
3062     ],
3063     [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
3064   >;
3065 
3066 // i32 %r = llvm.amdgcn.sudot4(i1 %a_sign, v4i8 (as i32) %a, i1 %b_sign, v4i8 (as i32) %b, i32 %c, i1 %clamp)
3067 // Treat input as signed (_sign = 1) or unsigned (_sign = 0).
3068 // a[i in 0. . . 3] = (%a_sign ? a.i8[i] : promoteToSigned(a.u8[i]));
3069 // b[i in 0. . . 3] = (%b_sign ? b.i8[i] : promoteToSigned(b.u8[i]));
3070 //   %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + %c
3071 def int_amdgcn_sudot4 :
3072   ClangBuiltin<"__builtin_amdgcn_sudot4">,
3073   DefaultAttrsIntrinsic<
3074     [llvm_i32_ty], // %r
3075     [
3076       llvm_i1_ty,  // %a_sign
3077       llvm_i32_ty, // %a
3078       llvm_i1_ty,  // %b_sign
3079       llvm_i32_ty, // %b
3080       llvm_i32_ty, // %c
3081       llvm_i1_ty   // %clamp
3082     ],
3083     [IntrNoMem, IntrSpeculatable,
3084      ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]
3085   >;
3086 
3087 // i32 %r = llvm.amdgcn.sdot8(v8i4 (as i32) %a, v8i4 (as i32) %b, i32 %c, i1 %clamp)
3088 //   %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] +
3089 //        %a[4] * %b[4] + %a[5] * %b[5] + %a[6] * %b[6] + %a[7] * %b[7] + %c
3090 def int_amdgcn_sdot8 :
3091   ClangBuiltin<"__builtin_amdgcn_sdot8">,
3092   DefaultAttrsIntrinsic<
3093     [llvm_i32_ty], // %r
3094     [
3095       llvm_i32_ty, // %a
3096       llvm_i32_ty, // %b
3097       llvm_i32_ty, // %c
3098       llvm_i1_ty   // %clamp
3099     ],
3100     [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
3101   >;
3102 
3103 // u32 %r = llvm.amdgcn.udot8(v8u4 (as u32) %a, v8u4 (as u32) %b, u32 %c, i1 %clamp)
3104 //   %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] +
3105 //        %a[4] * %b[4] + %a[5] * %b[5] + %a[6] * %b[6] + %a[7] * %b[7] + %c
3106 def int_amdgcn_udot8 :
3107   ClangBuiltin<"__builtin_amdgcn_udot8">,
3108   DefaultAttrsIntrinsic<
3109     [llvm_i32_ty], // %r
3110     [
3111       llvm_i32_ty, // %a
3112       llvm_i32_ty, // %b
3113       llvm_i32_ty, // %c
3114       llvm_i1_ty   // %clamp
3115     ],
3116     [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
3117   >;
3118 
3119 // i32 %r = llvm.amdgcn.sudot8(i1 %a_sign, v8i4 (as i32) %a, i1 %b_sign, v8i4 (as i32) %b, i32 %c, i1 %clamp)
3120 // Treat input as signed (_sign = 1) or unsigned (_sign = 0).
3121 // a[i in 0. . . 7] = (%a_sign ? a.i4[i] : promoteToSigned(a.u4[i]));
3122 // b[i in 0. . . 7] = (%b_sign ? b.i4[i] : promoteToSigned(b.u4[i]));
3123 //   %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] +
3124 //        %a[4] * %b[4] + %a[5] * %b[5] + %a[6] * %b[6] + %a[7] * %b[7] + %c
3125   def int_amdgcn_sudot8 :
3126   ClangBuiltin<"__builtin_amdgcn_sudot8">,
3127   DefaultAttrsIntrinsic<
3128     [llvm_i32_ty], // %r
3129     [
3130       llvm_i1_ty,  // %a_sign
3131       llvm_i32_ty, // %a
3132       llvm_i1_ty,  // %b_sign
3133       llvm_i32_ty, // %b
3134       llvm_i32_ty, // %c
3135       llvm_i1_ty   // %clamp
3136     ],
3137     [IntrNoMem, IntrSpeculatable,
3138      ImmArg<ArgIndex<0>>,  ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]
3139   >;
3140 
3141 // f32 %r = llvm.amdgcn.dot4.f32.type_a.type_b (v4type_a (as i32) %a, v4type_b (as i32) %b, f32 %c)
3142 //   %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + %c
3143 class AMDGPU8bitFloatDot4Intrinsic :
3144   ClangBuiltin<!subst("int", "__builtin", NAME)>,
3145   DefaultAttrsIntrinsic<
3146     [llvm_float_ty], // %r
3147     [
3148       llvm_i32_ty,   // %a
3149       llvm_i32_ty,   // %b
3150       llvm_float_ty, // %c
3151     ],
3152     [IntrNoMem, IntrSpeculatable]
3153   >;
3154 
3155 def int_amdgcn_dot4_f32_fp8_bf8 : AMDGPU8bitFloatDot4Intrinsic;
3156 def int_amdgcn_dot4_f32_bf8_fp8 : AMDGPU8bitFloatDot4Intrinsic;
3157 def int_amdgcn_dot4_f32_fp8_fp8 : AMDGPU8bitFloatDot4Intrinsic;
3158 def int_amdgcn_dot4_f32_bf8_bf8 : AMDGPU8bitFloatDot4Intrinsic;
3159 
3160 //===----------------------------------------------------------------------===//
3161 // gfx908 intrinsics
3162 // ===----------------------------------------------------------------------===//
3163 
3164 // llvm.amdgcn.mfma.*.* vdst, srcA, srcB, srcC, cbsz, abid, blgp
3165 class AMDGPUMfmaIntrinsic<LLVMType DestTy, LLVMType SrcABTy> :
3166   ClangBuiltin<!subst("int", "__builtin", NAME)>,
3167   DefaultAttrsIntrinsic<[DestTy],
3168             [SrcABTy, SrcABTy, DestTy,
3169              llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
3170             [IntrConvergent, IntrNoMem,
3171              ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
3172 
3173 
3174 // srcA's format is determined by cbsz. srcB's format is determined by
3175 // blgp.
3176 //
3177 // These should be <8 x i32> for f8 formats, <6 x i32> for f6 formats,
3178 // and <4 x i32> for f4 formats. It is invalid to use a format that
3179 // requires more registers than the corresponding vector type (e.g. it
3180 // is illegal to use <6 x i32> in operand 0 if cbsz specifies an f8
3181 // format that requires 8 registers).
3182 class AMDGPUMfmaScaleIntrinsic<LLVMType DestTy> :
3183   DefaultAttrsIntrinsic<[DestTy],
3184             [llvm_anyvector_ty, llvm_anyvector_ty, DestTy,
3185              llvm_i32_ty, // cbsz
3186              llvm_i32_ty, // blgp
3187              // llvm_i1_ty, // TODO: neg_src2
3188              // llvm_i1_ty, // TODO: abs_src2
3189              // llvm_i1_ty, // TODO: clamp
3190              llvm_i32_ty, // op_sel (A matrix scale, 2-bits) // TODO: Make i2?
3191              llvm_i32_ty, // v_mfma_ld_scale_b32 src0 (A matrix scale)
3192              llvm_i32_ty, // op_sel (B matrix scale, 2-bits) // TODO: Make i2?
3193              llvm_i32_ty  // v_mfma_ld_scale_b32 src1 (B matrix scale)
3194             ],
3195             [IntrConvergent, IntrNoMem,
3196              ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>,
3197              ImmArg<ArgIndex<5>>, ImmArg<ArgIndex<7>>
3198              ]>;
3199 
3200 defset list<Intrinsic> AMDGPUMFMAIntrinsics908 = {
3201 def int_amdgcn_mfma_f32_32x32x1f32  : AMDGPUMfmaIntrinsic<llvm_v32f32_ty, llvm_float_ty>;
3202 def int_amdgcn_mfma_f32_16x16x1f32  : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_float_ty>;
3203 def int_amdgcn_mfma_f32_4x4x1f32    : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_float_ty>;
3204 def int_amdgcn_mfma_f32_32x32x2f32  : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_float_ty>;
3205 def int_amdgcn_mfma_f32_16x16x4f32  : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_float_ty>;
3206 def int_amdgcn_mfma_f32_32x32x4f16  : AMDGPUMfmaIntrinsic<llvm_v32f32_ty, llvm_v4f16_ty>;
3207 def int_amdgcn_mfma_f32_16x16x4f16  : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v4f16_ty>;
3208 def int_amdgcn_mfma_f32_4x4x4f16    : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_v4f16_ty>;
3209 def int_amdgcn_mfma_f32_32x32x8f16  : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v4f16_ty>;
3210 def int_amdgcn_mfma_f32_16x16x16f16 : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_v4f16_ty>;
3211 def int_amdgcn_mfma_i32_32x32x4i8   : AMDGPUMfmaIntrinsic<llvm_v32i32_ty, llvm_i32_ty>;
3212 def int_amdgcn_mfma_i32_16x16x4i8   : AMDGPUMfmaIntrinsic<llvm_v16i32_ty, llvm_i32_ty>;
3213 def int_amdgcn_mfma_i32_4x4x4i8     : AMDGPUMfmaIntrinsic<llvm_v4i32_ty,  llvm_i32_ty>;
3214 def int_amdgcn_mfma_i32_32x32x8i8   : AMDGPUMfmaIntrinsic<llvm_v16i32_ty, llvm_i32_ty>;
3215 def int_amdgcn_mfma_i32_16x16x16i8  : AMDGPUMfmaIntrinsic<llvm_v4i32_ty,  llvm_i32_ty>;
3216 def int_amdgcn_mfma_f32_32x32x2bf16 : AMDGPUMfmaIntrinsic<llvm_v32f32_ty, llvm_v2i16_ty>;
3217 def int_amdgcn_mfma_f32_16x16x2bf16 : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v2i16_ty>;
3218 def int_amdgcn_mfma_f32_4x4x2bf16   : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_v2i16_ty>;
3219 def int_amdgcn_mfma_f32_32x32x4bf16 : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v2i16_ty>;
3220 def int_amdgcn_mfma_f32_16x16x8bf16 : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_v2i16_ty>;
3221 }
3222 
3223 //===----------------------------------------------------------------------===//
3224 // gfx90a intrinsics
3225 // ===----------------------------------------------------------------------===//
3226 
3227 defset list<Intrinsic> AMDGPUMFMAIntrinsics90A = {
3228 def int_amdgcn_mfma_f32_32x32x4bf16_1k  : AMDGPUMfmaIntrinsic<llvm_v32f32_ty, llvm_v4i16_ty>;
3229 def int_amdgcn_mfma_f32_16x16x4bf16_1k  : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v4i16_ty>;
3230 def int_amdgcn_mfma_f32_4x4x4bf16_1k    : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_v4i16_ty>;
3231 def int_amdgcn_mfma_f32_32x32x8bf16_1k  : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v4i16_ty>;
3232 def int_amdgcn_mfma_f32_16x16x16bf16_1k : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_v4i16_ty>;
3233 
3234 // Note: in gfx940 BLGP argument is replaced by NEG bitfield in the DGEMM MFMA.
3235 //       Three bits corresponding to the neg modifier applied to the respective
3236 //       source operand.
3237 def int_amdgcn_mfma_f64_16x16x4f64      : AMDGPUMfmaIntrinsic<llvm_v4f64_ty,  llvm_double_ty>;
3238 def int_amdgcn_mfma_f64_4x4x4f64        : AMDGPUMfmaIntrinsic<llvm_double_ty, llvm_double_ty>;
3239 }
3240 
3241 //===----------------------------------------------------------------------===//
3242 // gfx940 intrinsics
3243 // ===----------------------------------------------------------------------===//
3244 
3245 class AMDGPUMFp8MfmaIntrinsic<LLVMType DestTy> :
3246   AMDGPUMfmaIntrinsic<DestTy, llvm_i64_ty>;
3247 
3248 multiclass AMDGPUMFp8MfmaIntrinsic<LLVMType DestTy> {
3249   foreach kind = ["bf8_bf8", "bf8_fp8", "fp8_bf8", "fp8_fp8"] in
3250     def NAME#"_"#kind : AMDGPUMFp8MfmaIntrinsic<DestTy>;
3251 }
3252 
3253 // llvm.amdgcn.smfmac.?32.* vdst, srcA, srcB, srcC, index, cbsz, abid
3254 class AMDGPUMSmfmacIntrinsic<LLVMType DestTy, LLVMType SrcA, LLVMType SrcB> :
3255   ClangBuiltin<!subst("int", "__builtin", NAME)>,
3256   DefaultAttrsIntrinsic<[DestTy],
3257             [SrcA, SrcB, DestTy, llvm_i32_ty,
3258              llvm_i32_ty, llvm_i32_ty],
3259             [IntrConvergent, IntrNoMem,
3260              ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
3261 
3262 class AMDGPUMFp8SmfmacIntrinsic<LLVMType DestTy> :
3263   AMDGPUMSmfmacIntrinsic<DestTy, llvm_v2i32_ty, llvm_v4i32_ty>;
3264 
3265 multiclass AMDGPUMFp8SmfmacIntrinsic<LLVMType DestTy> {
3266   foreach kind = ["bf8_bf8", "bf8_fp8", "fp8_bf8", "fp8_fp8"] in
3267     def NAME#"_"#kind : AMDGPUMFp8SmfmacIntrinsic<DestTy>;
3268 }
3269 
3270 defset list<Intrinsic> AMDGPUMFMAIntrinsics940 = {
3271 def int_amdgcn_mfma_i32_16x16x32_i8     : AMDGPUMfmaIntrinsic<llvm_v4i32_ty,  llvm_i64_ty>;
3272 def int_amdgcn_mfma_i32_32x32x16_i8     : AMDGPUMfmaIntrinsic<llvm_v16i32_ty, llvm_i64_ty>;
3273 def int_amdgcn_mfma_f32_16x16x8_xf32    : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_v2f32_ty>;
3274 def int_amdgcn_mfma_f32_32x32x4_xf32    : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v2f32_ty>;
3275 
3276 defm int_amdgcn_mfma_f32_16x16x32 : AMDGPUMFp8MfmaIntrinsic<llvm_v4f32_ty>;
3277 defm int_amdgcn_mfma_f32_32x32x16 : AMDGPUMFp8MfmaIntrinsic<llvm_v16f32_ty>;
3278 
3279 def int_amdgcn_smfmac_f32_16x16x32_f16  : AMDGPUMSmfmacIntrinsic<llvm_v4f32_ty,  llvm_v4f16_ty, llvm_v8f16_ty>;
3280 def int_amdgcn_smfmac_f32_32x32x16_f16  : AMDGPUMSmfmacIntrinsic<llvm_v16f32_ty, llvm_v4f16_ty, llvm_v8f16_ty>;
3281 def int_amdgcn_smfmac_f32_16x16x32_bf16 : AMDGPUMSmfmacIntrinsic<llvm_v4f32_ty,  llvm_v4i16_ty, llvm_v8i16_ty>;
3282 def int_amdgcn_smfmac_f32_32x32x16_bf16 : AMDGPUMSmfmacIntrinsic<llvm_v16f32_ty, llvm_v4i16_ty, llvm_v8i16_ty>;
3283 def int_amdgcn_smfmac_i32_16x16x64_i8   : AMDGPUMSmfmacIntrinsic<llvm_v4i32_ty,  llvm_v2i32_ty, llvm_v4i32_ty>;
3284 def int_amdgcn_smfmac_i32_32x32x32_i8   : AMDGPUMSmfmacIntrinsic<llvm_v16i32_ty, llvm_v2i32_ty, llvm_v4i32_ty>;
3285 
3286 defm int_amdgcn_smfmac_f32_16x16x64 : AMDGPUMFp8SmfmacIntrinsic<llvm_v4f32_ty>;
3287 defm int_amdgcn_smfmac_f32_32x32x32 : AMDGPUMFp8SmfmacIntrinsic<llvm_v16f32_ty>;
3288 }
3289 
3290 // llvm.amdgcn.cvt.f32.bf8 float vdst, int srcA, imm byte_sel [0..3]
3291 // byte_sel selects byte from srcA.
3292 def int_amdgcn_cvt_f32_bf8 : ClangBuiltin<"__builtin_amdgcn_cvt_f32_bf8">,
3293   DefaultAttrsIntrinsic<[llvm_float_ty],
3294             [llvm_i32_ty, llvm_i32_ty],
3295             [IntrNoMem, ImmArg<ArgIndex<1>>]>;
3296 
3297 // llvm.amdgcn.cvt.f32.fp8 float vdst, int srcA, imm byte_sel [0..3]
3298 def int_amdgcn_cvt_f32_fp8 : ClangBuiltin<"__builtin_amdgcn_cvt_f32_fp8">,
3299   DefaultAttrsIntrinsic<[llvm_float_ty],
3300             [llvm_i32_ty, llvm_i32_ty],
3301             [IntrNoMem, ImmArg<ArgIndex<1>>]>;
3302 
3303 // llvm.amdgcn.cvt.pk.f32.bf8 float2 vdst, int srcA, imm word_sel
3304 // word_sel = 1 selects 2 high bytes, 0 selects 2 low bytes.
3305 def int_amdgcn_cvt_pk_f32_bf8 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_f32_bf8">,
3306   DefaultAttrsIntrinsic<[llvm_v2f32_ty],
3307             [llvm_i32_ty, llvm_i1_ty],
3308             [IntrNoMem, ImmArg<ArgIndex<1>>]>;
3309 
3310 // llvm.amdgcn.cvt.pk.f32.fp8 float2 vdst, int srcA, imm word_sel.
3311 def int_amdgcn_cvt_pk_f32_fp8 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_f32_fp8">,
3312   DefaultAttrsIntrinsic<[llvm_v2f32_ty],
3313             [llvm_i32_ty, llvm_i1_ty],
3314             [IntrNoMem, ImmArg<ArgIndex<1>>]>;
3315 
3316 // llvm.amdgcn.cvt.pk.bf8.f32 int vdst, float srcA, float srcB, int old, imm word_sel
3317 // word_sel = 1 selects 2 high bytes in the vdst, 0 selects 2 low bytes.
3318 def int_amdgcn_cvt_pk_bf8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_bf8_f32">,
3319   DefaultAttrsIntrinsic<[llvm_i32_ty],
3320             [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i1_ty],
3321             [IntrNoMem, ImmArg<ArgIndex<3>>]>;
3322 
3323 // llvm.amdgcn.cvt.pk.fp8.f32 int vdst, float srcA, float srcB, int old, imm word_sel
3324 def int_amdgcn_cvt_pk_fp8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_fp8_f32">,
3325   DefaultAttrsIntrinsic<[llvm_i32_ty],
3326             [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i1_ty],
3327             [IntrNoMem, ImmArg<ArgIndex<3>>]>;
3328 
3329 // llvm.amdgcn.cvt.sr.bf8.f32 int vdst, float srcA, int srcB, int old, imm byte_sel [0..3]
3330 // byte_sel selects byte to write into vdst.
3331 def int_amdgcn_cvt_sr_bf8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_sr_bf8_f32">,
3332   DefaultAttrsIntrinsic<[llvm_i32_ty],
3333             [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
3334             [IntrNoMem, ImmArg<ArgIndex<3>>]>;
3335 
3336 // llvm.amdgcn.cvt.sr.fp8.f32 int vdst, float srcA, int srcB, int old, imm byte_sel [0..3]
3337 def int_amdgcn_cvt_sr_fp8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_sr_fp8_f32">,
3338   DefaultAttrsIntrinsic<[llvm_i32_ty],
3339             [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
3340             [IntrNoMem, ImmArg<ArgIndex<3>>]>;
3341 
3342 //===----------------------------------------------------------------------===//
3343 // gfx950 intrinsics
3344 //===----------------------------------------------------------------------===//
3345 
3346 defset list<Intrinsic> AMDGPUMFMAIntrinsics950 = {
3347 def int_amdgcn_mfma_f32_16x16x32_f16 : AMDGPUMfmaIntrinsic<llvm_v4f32_ty, llvm_v8f16_ty>;
3348 def int_amdgcn_mfma_f32_32x32x16_f16 : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v8f16_ty>;
3349 def int_amdgcn_mfma_i32_16x16x64_i8 : AMDGPUMfmaIntrinsic<llvm_v4i32_ty, llvm_v4i32_ty>;
3350 def int_amdgcn_mfma_i32_32x32x32_i8 : AMDGPUMfmaIntrinsic<llvm_v16i32_ty, llvm_v4i32_ty>;
3351 def int_amdgcn_mfma_f32_16x16x32_bf16 : AMDGPUMfmaIntrinsic<llvm_v4f32_ty, llvm_v8bf16_ty>;
3352 def int_amdgcn_mfma_f32_32x32x16_bf16 : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v8bf16_ty>;
3353 def int_amdgcn_mfma_scale_f32_16x16x128_f8f6f4 : AMDGPUMfmaScaleIntrinsic<llvm_v4f32_ty>;
3354 def int_amdgcn_mfma_scale_f32_32x32x64_f8f6f4 : AMDGPUMfmaScaleIntrinsic<llvm_v16f32_ty>;
3355 def int_amdgcn_smfmac_f32_16x16x64_f16 : AMDGPUMSmfmacIntrinsic<llvm_v4f32_ty, llvm_v8f16_ty, llvm_v16f16_ty>;
3356 def int_amdgcn_smfmac_f32_32x32x32_f16 : AMDGPUMSmfmacIntrinsic<llvm_v16f32_ty, llvm_v8f16_ty, llvm_v16f16_ty>;
3357 def int_amdgcn_smfmac_f32_16x16x64_bf16 : AMDGPUMSmfmacIntrinsic<llvm_v4f32_ty, llvm_v8bf16_ty, llvm_v16bf16_ty>;
3358 def int_amdgcn_smfmac_f32_32x32x32_bf16 : AMDGPUMSmfmacIntrinsic<llvm_v16f32_ty, llvm_v8bf16_ty, llvm_v16bf16_ty>;
3359 def int_amdgcn_smfmac_i32_16x16x128_i8 : AMDGPUMSmfmacIntrinsic<llvm_v4i32_ty,  llvm_v4i32_ty, llvm_v8i32_ty>;
3360 def int_amdgcn_smfmac_i32_32x32x64_i8 : AMDGPUMSmfmacIntrinsic<llvm_v16i32_ty, llvm_v4i32_ty, llvm_v8i32_ty>;
3361 def int_amdgcn_smfmac_f32_16x16x128_bf8_bf8 : AMDGPUMSmfmacIntrinsic<llvm_v4f32_ty, llvm_v4i32_ty, llvm_v8i32_ty>;
3362 def int_amdgcn_smfmac_f32_16x16x128_bf8_fp8 : AMDGPUMSmfmacIntrinsic<llvm_v4f32_ty, llvm_v4i32_ty, llvm_v8i32_ty>;
3363 def int_amdgcn_smfmac_f32_16x16x128_fp8_bf8 : AMDGPUMSmfmacIntrinsic<llvm_v4f32_ty, llvm_v4i32_ty, llvm_v8i32_ty>;
3364 def int_amdgcn_smfmac_f32_16x16x128_fp8_fp8 : AMDGPUMSmfmacIntrinsic<llvm_v4f32_ty, llvm_v4i32_ty, llvm_v8i32_ty>;
3365 def int_amdgcn_smfmac_f32_32x32x64_bf8_bf8 : AMDGPUMSmfmacIntrinsic<llvm_v16f32_ty, llvm_v4i32_ty, llvm_v8i32_ty>;
3366 def int_amdgcn_smfmac_f32_32x32x64_bf8_fp8 : AMDGPUMSmfmacIntrinsic<llvm_v16f32_ty, llvm_v4i32_ty, llvm_v8i32_ty>;
3367 def int_amdgcn_smfmac_f32_32x32x64_fp8_bf8 : AMDGPUMSmfmacIntrinsic<llvm_v16f32_ty, llvm_v4i32_ty, llvm_v8i32_ty>;
3368 def int_amdgcn_smfmac_f32_32x32x64_fp8_fp8 : AMDGPUMSmfmacIntrinsic<llvm_v16f32_ty, llvm_v4i32_ty, llvm_v8i32_ty>;
3369 }
3370 
3371 // { vdst_new, vsrc_new } llvm.amdgcn.permlane16.swap <vdst_old> <vsrc_old> <fi> <bound_control>
3372 def int_amdgcn_permlane16_swap :
3373   Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
3374                                          llvm_i1_ty, llvm_i1_ty],
3375             [IntrNoMem, IntrConvergent, IntrWillReturn,
3376              ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, IntrNoCallback, IntrNoFree]>;
3377 
3378 // { vdst_new, vsrc_new } llvm.amdgcn.permlane32.swap <vdst_old> <vsrc_old> <fi> <bound_control>
3379 def int_amdgcn_permlane32_swap :
3380   Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
3381                                          llvm_i1_ty, llvm_i1_ty],
3382             [IntrNoMem, IntrConvergent, IntrWillReturn,
3383              ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, IntrNoCallback, IntrNoFree]>;
3384 
3385 // llvm.amdgcn.ashr_pk_i8_i32 int vdst, int src0, int src1 int src2
3386 def int_amdgcn_ashr_pk_i8_i32 : ClangBuiltin<"__builtin_amdgcn_ashr_pk_i8_i32">,
3387   DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
3388             [IntrNoMem, IntrSpeculatable]>;
3389 
3390 // llvm.amdgcn.ashr_pk_u8_i32 int vdst, int src0, int src1 int src2
3391 def int_amdgcn_ashr_pk_u8_i32 : ClangBuiltin<"__builtin_amdgcn_ashr_pk_u8_i32">,
3392   DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
3393             [IntrNoMem, IntrSpeculatable]>;
3394 
3395 //===----------------------------------------------------------------------===//
3396 // Special Intrinsics for backend internal use only. No frontend
3397 // should emit calls to these.
3398 // ===----------------------------------------------------------------------===//
3399 //
3400 // Control-flow intrinsics in LLVM IR are convergent because they represent the
3401 // wave CFG, i.e., sets of threads that are "converged" or "execute in
3402 // lock-step". But they exist during a small window in the lowering process,
3403 // inserted after the structurizer and then translated to equivalent MIR
3404 // pseudos. So rather than create convergence tokens for these builtins, we
3405 // simply mark them as not convergent.
3406 //
3407 // This is really a workaround to allow control flow lowering in the presence of
3408 // convergence control tokens. The corresponding MIR pseudos are marked as
3409 // having side effects, which is sufficient to prevent optimizations without
3410 // having to mark them as convergent.
3411 def int_amdgcn_if : Intrinsic<[llvm_i1_ty, llvm_anyint_ty],
3412   [llvm_i1_ty], [IntrWillReturn, IntrNoCallback, IntrNoFree]
3413 >;
3414 
3415 def int_amdgcn_else : Intrinsic<[llvm_i1_ty, llvm_anyint_ty],
3416   [llvm_anyint_ty], [IntrWillReturn, IntrNoCallback, IntrNoFree]
3417 >;
3418 
3419 def int_amdgcn_if_break : Intrinsic<[llvm_anyint_ty],
3420   [llvm_i1_ty, LLVMMatchType<0>],
3421   [IntrNoMem, IntrWillReturn, IntrNoCallback, IntrNoFree]
3422 >;
3423 
3424 def int_amdgcn_loop : Intrinsic<[llvm_i1_ty],
3425   [llvm_anyint_ty], [IntrWillReturn, IntrNoCallback, IntrNoFree]
3426 >;
3427 
3428 def int_amdgcn_end_cf : Intrinsic<[], [llvm_anyint_ty],
3429   [IntrWillReturn, IntrNoCallback, IntrNoFree]>;
3430 
3431 // Represent unreachable in a divergent region.
3432 def int_amdgcn_unreachable : Intrinsic<[], [], [IntrConvergent, IntrNoCallback, IntrNoFree]>;
3433 
3434 // Emit 2.5 ulp, no denormal division. Should only be inserted by
3435 // pass based on !fpmath metadata.
3436 def int_amdgcn_fdiv_fast : DefaultAttrsIntrinsic<
3437   [llvm_float_ty], [llvm_float_ty, llvm_float_ty],
3438   [IntrNoMem, IntrSpeculatable]
3439 >;
3440 
3441 /// Emit an addrspacecast without null pointer checking.
3442 /// Should only be inserted by a pass based on analysis of an addrspacecast's src.
3443 def int_amdgcn_addrspacecast_nonnull : DefaultAttrsIntrinsic<
3444   [llvm_anyptr_ty], [llvm_anyptr_ty],
3445   [IntrNoMem, IntrSpeculatable]
3446 >;
3447 }