Warning, /include/llvm/IR/IntrinsicsRISCV.td is written in an unsupported language. File is not indexed.
0001 //===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
0002 //
0003 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
0004 // See https://llvm.org/LICENSE.txt for license information.
0005 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
0006 //
0007 //===----------------------------------------------------------------------===//
0008 //
0009 // This file defines all of the RISCV-specific intrinsics.
0010 //
0011 //===----------------------------------------------------------------------===//
0012
0013 //===----------------------------------------------------------------------===//
0014 // Atomics
0015
0016 // Atomic Intrinsics have multiple versions for different access widths, which
0017 // all follow one of the following signatures (depending on how many arguments
0018 // they require). We carefully instantiate only specific versions of these for
0019 // specific integer widths, rather than using `llvm_anyint_ty`.
0020 //
0021 // In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
0022 // canonical names, and the intrinsics used in the code will have a name
0023 // suffixed with the pointer type they are specialised for (denoted `<p>` in the
0024 // names below), in order to avoid type conflicts.
0025
0026 let TargetPrefix = "riscv" in {
0027
0028 // T @llvm.<name>.T.<p>(any*, T, T, T imm);
0029 class MaskedAtomicRMWFourArg<LLVMType itype>
0030 : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
0031 [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
0032 // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
0033 class MaskedAtomicRMWFiveArg<LLVMType itype>
0034 : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
0035 [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;
0036
0037 // We define 32-bit and 64-bit variants of the above, where T stands for i32
0038 // or i64 respectively:
0039 multiclass MaskedAtomicRMWFourArgIntrinsics {
0040 // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
0041 def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
0042 // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
0043 def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
0044 }
0045
0046 multiclass MaskedAtomicRMWFiveArgIntrinsics {
0047 // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
0048 def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
0049 // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
0050 def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
0051 }
0052
0053 // These intrinsics are intended only for internal compiler use (i.e. as
0054 // part of AtomicExpandpass via the emitMaskedAtomic*Intrinsic hooks). Their
0055 // names and semantics could change in the future.
0056
0057 // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(
0058 // ptr addr, ixlen oparg, ixlen mask, ixlenimm ordering)
0059 defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
0060 defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
0061 defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
0062 defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
0063 defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
0064 defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
0065 // Signed min and max need an extra operand to do sign extension with.
0066 // @llvm.riscv.masked.atomicrmw.{max,min}.{i32,i64}.<p>(
0067 // ptr addr, ixlen oparg, ixlen mask, ixlen shamt, ixlenimm ordering)
0068 defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
0069 defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;
0070
0071 // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(
0072 // ptr addr, ixlen cmpval, ixlen newval, ixlen mask, ixlenimm ordering)
0073 defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;
0074
0075 } // TargetPrefix = "riscv"
0076
0077 //===----------------------------------------------------------------------===//
0078 // Bitmanip (Bit Manipulation) Extension
0079
0080 let TargetPrefix = "riscv" in {
0081
0082 class BitManipGPRIntrinsics
0083 : DefaultAttrsIntrinsic<[llvm_any_ty],
0084 [LLVMMatchType<0>],
0085 [IntrNoMem, IntrSpeculatable]>;
0086 class BitManipGPRGPRIntrinsics
0087 : DefaultAttrsIntrinsic<[llvm_any_ty],
0088 [LLVMMatchType<0>, LLVMMatchType<0>],
0089 [IntrNoMem, IntrSpeculatable]>;
0090
0091 // Zbb
0092 def int_riscv_orc_b : BitManipGPRIntrinsics;
0093
0094 // Zbc or Zbkc
0095 def int_riscv_clmul : BitManipGPRGPRIntrinsics;
0096 def int_riscv_clmulh : BitManipGPRGPRIntrinsics;
0097
0098 // Zbc
0099 def int_riscv_clmulr : BitManipGPRGPRIntrinsics;
0100
0101 // Zbkb
0102 def int_riscv_brev8 : BitManipGPRIntrinsics;
0103 def int_riscv_zip : BitManipGPRIntrinsics;
0104 def int_riscv_unzip : BitManipGPRIntrinsics;
0105
0106 // Zbkx
0107 def int_riscv_xperm4 : BitManipGPRGPRIntrinsics;
0108 def int_riscv_xperm8 : BitManipGPRGPRIntrinsics;
0109 } // TargetPrefix = "riscv"
0110
0111 //===----------------------------------------------------------------------===//
0112 // May-Be-Operations
0113
0114 let TargetPrefix = "riscv" in {
0115
0116 // Zimop
0117 def int_riscv_mopr
0118 : DefaultAttrsIntrinsic<[llvm_any_ty],
0119 [LLVMMatchType<0>, LLVMMatchType<0>],
0120 [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<1>>]>;
0121 def int_riscv_moprr
0122 : DefaultAttrsIntrinsic<[llvm_any_ty],
0123 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
0124 [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]>;
0125 } // TargetPrefix = "riscv"
0126
0127 //===----------------------------------------------------------------------===//
0128 // Vectors
0129
0130 // The intrinsic does not have any operand that must be extended.
0131 defvar NoScalarOperand = 0xF;
0132
0133 // The intrinsic does not have a VL operand.
0134 // (e.g., riscv_vmv_x_s and riscv_vfmv_f_s)
0135 defvar NoVLOperand = 0x1F;
0136
0137 class RISCVVIntrinsic {
0138 // These intrinsics may accept illegal integer values in their llvm_any_ty
0139 // operand, so they have to be extended.
0140 Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
0141 bits<4> ScalarOperand = NoScalarOperand;
0142 bits<5> VLOperand = NoVLOperand;
0143 }
0144
0145 let TargetPrefix = "riscv" in {
0146 // We use anyint here but we only support XLen.
0147 def int_riscv_vsetvli : DefaultAttrsIntrinsic<[llvm_anyint_ty],
0148 /* AVL */ [LLVMMatchType<0>,
0149 /* VSEW */ LLVMMatchType<0>,
0150 /* VLMUL */ LLVMMatchType<0>],
0151 [IntrNoMem,
0152 ImmArg<ArgIndex<1>>,
0153 ImmArg<ArgIndex<2>>]>;
0154 def int_riscv_vsetvlimax : DefaultAttrsIntrinsic<[llvm_anyint_ty],
0155 /* VSEW */ [LLVMMatchType<0>,
0156 /* VLMUL */ LLVMMatchType<0>],
0157 [IntrNoMem,
0158 ImmArg<ArgIndex<0>>,
0159 ImmArg<ArgIndex<1>>]>;
0160
0161 // For unit stride mask load
0162 // Input: (pointer, vl)
0163 class RISCVUSMLoad
0164 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0165 [llvm_ptr_ty, llvm_anyint_ty],
0166 [NoCapture<ArgIndex<0>>, IntrReadMem, IntrArgMemOnly]>,
0167 RISCVVIntrinsic {
0168 let VLOperand = 1;
0169 }
0170 // For unit stride load
0171 // Input: (passthru, pointer, vl)
0172 class RISCVUSLoad
0173 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0174 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty],
0175 [NoCapture<ArgIndex<1>>, IntrReadMem, IntrArgMemOnly]>,
0176 RISCVVIntrinsic {
0177 let VLOperand = 2;
0178 }
0179 // For unit stride fault-only-first load
0180 // Input: (passthru, pointer, vl)
0181 // Output: (data, vl)
0182 // NOTE: We model this with default memory properties since we model writing
0183 // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
0184 class RISCVUSLoadFF
0185 : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
0186 [LLVMMatchType<0>, llvm_ptr_ty, LLVMMatchType<1>],
0187 [NoCapture<ArgIndex<1>>]>,
0188 RISCVVIntrinsic {
0189 let VLOperand = 2;
0190 }
0191 // For unit stride load with mask
0192 // Input: (maskedoff, pointer, mask, vl, policy)
0193 class RISCVUSLoadMasked
0194 : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
0195 [LLVMMatchType<0>, llvm_ptr_ty,
0196 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
0197 llvm_anyint_ty, LLVMMatchType<1>],
0198 [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem,
0199 IntrArgMemOnly]>,
0200 RISCVVIntrinsic {
0201 let VLOperand = 3;
0202 }
0203 // For unit stride fault-only-first load with mask
0204 // Input: (maskedoff, pointer, mask, vl, policy)
0205 // Output: (data, vl)
0206 // NOTE: We model this with default memory properties since we model writing
0207 // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
0208 class RISCVUSLoadFFMasked
0209 : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
0210 [LLVMMatchType<0>, llvm_ptr_ty,
0211 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
0212 LLVMMatchType<1>, LLVMMatchType<1>],
0213 [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
0214 let VLOperand = 3;
0215 }
0216 // For strided load with passthru operand
0217 // Input: (passthru, pointer, stride, vl)
0218 class RISCVSLoad
0219 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0220 [LLVMMatchType<0>, llvm_ptr_ty,
0221 llvm_anyint_ty, LLVMMatchType<1>],
0222 [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
0223 let VLOperand = 3;
0224 }
0225 // For strided load with mask
0226 // Input: (maskedoff, pointer, stride, mask, vl, policy)
0227 class RISCVSLoadMasked
0228 : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
0229 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty,
0230 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
0231 LLVMMatchType<1>],
0232 [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
0233 RISCVVIntrinsic {
0234 let VLOperand = 4;
0235 }
0236 // For indexed load with passthru operand
0237 // Input: (passthru, pointer, index, vl)
0238 class RISCVILoad
0239 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0240 [LLVMMatchType<0>, llvm_ptr_ty,
0241 llvm_anyvector_ty, llvm_anyint_ty],
0242 [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
0243 let VLOperand = 3;
0244 }
0245 // For indexed load with mask
0246 // Input: (maskedoff, pointer, index, mask, vl, policy)
0247 class RISCVILoadMasked
0248 : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
0249 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty,
0250 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0251 LLVMMatchType<2>],
0252 [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
0253 RISCVVIntrinsic {
0254 let VLOperand = 4;
0255 }
0256 // For unit stride store
0257 // Input: (vector_in, pointer, vl)
0258 class RISCVUSStore
0259 : DefaultAttrsIntrinsic<[],
0260 [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty],
0261 [NoCapture<ArgIndex<1>>, IntrWriteMem, IntrArgMemOnly]>,
0262 RISCVVIntrinsic {
0263 let VLOperand = 2;
0264 }
0265 // For unit stride store with mask
0266 // Input: (vector_in, pointer, mask, vl)
0267 class RISCVUSStoreMasked
0268 : DefaultAttrsIntrinsic<[],
0269 [llvm_anyvector_ty, llvm_ptr_ty,
0270 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
0271 llvm_anyint_ty],
0272 [NoCapture<ArgIndex<1>>, IntrWriteMem, IntrArgMemOnly]>,
0273 RISCVVIntrinsic {
0274 let VLOperand = 3;
0275 }
0276 // For strided store
0277 // Input: (vector_in, pointer, stride, vl)
0278 class RISCVSStore
0279 : DefaultAttrsIntrinsic<[],
0280 [llvm_anyvector_ty, llvm_ptr_ty,
0281 llvm_anyint_ty, LLVMMatchType<1>],
0282 [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
0283 let VLOperand = 3;
0284 }
0285 // For stride store with mask
0286 // Input: (vector_in, pointer, stirde, mask, vl)
0287 class RISCVSStoreMasked
0288 : DefaultAttrsIntrinsic<[],
0289 [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty,
0290 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
0291 [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
0292 let VLOperand = 4;
0293 }
0294 // For indexed store
0295 // Input: (vector_in, pointer, index, vl)
0296 class RISCVIStore
0297 : DefaultAttrsIntrinsic<[],
0298 [llvm_anyvector_ty, llvm_ptr_ty,
0299 llvm_anyint_ty, llvm_anyint_ty],
0300 [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
0301 let VLOperand = 3;
0302 }
0303 // For indexed store with mask
0304 // Input: (vector_in, pointer, index, mask, vl)
0305 class RISCVIStoreMasked
0306 : DefaultAttrsIntrinsic<[],
0307 [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyvector_ty,
0308 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
0309 [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
0310 let VLOperand = 4;
0311 }
0312 // For destination vector type is the same as source vector.
0313 // Input: (passthru, vector_in, vl)
0314 class RISCVUnaryAAUnMasked
0315 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0316 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
0317 [IntrNoMem]>, RISCVVIntrinsic {
0318 let VLOperand = 2;
0319 }
0320 // For destination vector type is the same as the source vector type
0321 // Input: (passthru, vector_in, vl, policy)
0322 class RISCVUnaryAAUnMaskedZvk<bit IsVS>
0323 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0324 [LLVMMatchType<0>, !if(IsVS, llvm_anyvector_ty, LLVMMatchType<0>),
0325 llvm_anyint_ty, !if(IsVS, LLVMMatchType<2>, LLVMMatchType<1>)],
0326 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
0327 let VLOperand = 2;
0328 }
0329
0330 multiclass RISCVUnaryAAUnMaskedZvk<bit HasVV = 1, bit HasVS = 1> {
0331 if HasVV then
0332 def "int_riscv_" # NAME # "_vv" : RISCVUnaryAAUnMaskedZvk<IsVS=0>;
0333
0334 if HasVS then
0335 def "int_riscv_" # NAME # "_vs" : RISCVUnaryAAUnMaskedZvk<IsVS=1>;
0336 }
0337 // For destination vector type is the same as first source vector (with mask).
0338 // Input: (vector_in, vector_in, mask, vl, policy)
0339 class RISCVUnaryAAMasked
0340 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0341 [LLVMMatchType<0>, LLVMMatchType<0>,
0342 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0343 LLVMMatchType<1>],
0344 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
0345 let VLOperand = 3;
0346 }
0347 // For destination vector type is the same as source vector.
0348 // Input: (passthru, vector_in, frm, vl)
0349 class RISCVUnaryAAUnMaskedRoundingMode
0350 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0351 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
0352 [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic {
0353 let VLOperand = 3;
0354 }
0355 // For destination vector type is the same as first source vector (with mask).
0356 // Input: (vector_in, vector_in, mask, frm, vl, policy)
0357 class RISCVUnaryAAMaskedRoundingMode
0358 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0359 [LLVMMatchType<0>, LLVMMatchType<0>,
0360 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0361 LLVMMatchType<1>, LLVMMatchType<1>],
0362 [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
0363 let VLOperand = 4;
0364 }
0365 // Input: (passthru, vector_in, vector_in, mask, vl)
0366 class RISCVCompress
0367 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0368 [LLVMMatchType<0>, LLVMMatchType<0>,
0369 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
0370 [IntrNoMem]>, RISCVVIntrinsic {
0371 let VLOperand = 3;
0372 }
0373 // For destination vector type is the same as first and second source vector.
0374 // Input: (vector_in, vector_in, vl)
0375 class RISCVBinaryAAAUnMasked
0376 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0377 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
0378 [IntrNoMem]>, RISCVVIntrinsic {
0379 let VLOperand = 2;
0380 }
0381 // For destination vector type is the same as first and second source vector.
0382 // Input: (passthru, vector_in, int_vector_in, vl)
0383 class RISCVRGatherVVUnMasked
0384 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0385 [LLVMMatchType<0>, LLVMMatchType<0>,
0386 LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
0387 [IntrNoMem]>, RISCVVIntrinsic {
0388 let VLOperand = 3;
0389 }
0390 // For destination vector type is the same as first and second source vector.
0391 // Input: (vector_in, vector_in, int_vector_in, vl, policy)
0392 class RISCVRGatherVVMasked
0393 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0394 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
0395 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0396 LLVMMatchType<1>],
0397 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
0398 let VLOperand = 4;
0399 }
0400 // Input: (passthru, vector_in, int16_vector_in, vl)
0401 class RISCVRGatherEI16VVUnMasked
0402 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0403 [LLVMMatchType<0>, LLVMMatchType<0>,
0404 LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
0405 llvm_anyint_ty],
0406 [IntrNoMem]>, RISCVVIntrinsic {
0407 let VLOperand = 3;
0408 }
0409 // For destination vector type is the same as first and second source vector.
0410 // Input: (vector_in, vector_in, int16_vector_in, vl, policy)
0411 class RISCVRGatherEI16VVMasked
0412 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0413 [LLVMMatchType<0>, LLVMMatchType<0>,
0414 LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
0415 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0416 LLVMMatchType<1>],
0417 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
0418 let VLOperand = 4;
0419 }
0420 // For destination vector type is the same as first source vector, and the
0421 // second operand is XLen.
0422 // Input: (passthru, vector_in, xlen_in, vl)
0423 class RISCVGatherVXUnMasked
0424 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0425 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
0426 LLVMMatchType<1>],
0427 [IntrNoMem]>, RISCVVIntrinsic {
0428 let VLOperand = 3;
0429 }
0430 // For destination vector type is the same as first source vector (with mask).
0431 // Second operand is XLen.
0432 // Input: (maskedoff, vector_in, xlen_in, mask, vl, policy)
0433 class RISCVGatherVXMasked
0434 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0435 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
0436 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
0437 LLVMMatchType<1>],
0438 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
0439 let VLOperand = 4;
0440 }
0441 // For destination vector type is the same as first source vector.
0442 // Input: (passthru, vector_in, vector_in/scalar_in, vl)
0443 class RISCVBinaryAAXUnMasked<bit IsVI = 0>
0444 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0445 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
0446 llvm_anyint_ty],
0447 !listconcat([IntrNoMem],
0448 !if(IsVI, [ImmArg<ArgIndex<2>>], []))>,
0449 RISCVVIntrinsic {
0450 let ScalarOperand = 2;
0451 let VLOperand = 3;
0452 }
0453 // For destination vector type is the same as the source vector type.
0454 // Input: (passthru, vector_in, vector_in/scalar_in, vl, policy)
0455 class RISCVBinaryAAXUnMaskedZvk<bit IsVI = 0>
0456 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0457 [LLVMMatchType<0>, LLVMMatchType<0>,
0458 llvm_any_ty, llvm_anyint_ty, LLVMMatchType<2>],
0459 !listconcat([ImmArg<ArgIndex<4>>, IntrNoMem],
0460 !if(IsVI, [ImmArg<ArgIndex<2>>], []))>,
0461 RISCVVIntrinsic {
0462 let ScalarOperand = 2;
0463 let VLOperand = 3;
0464 }
0465 // For destination vector type is the same as first source vector (with mask).
0466 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
0467 class RISCVBinaryAAXMasked
0468 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0469 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
0470 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0471 LLVMMatchType<2>],
0472 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
0473 let ScalarOperand = 2;
0474 let VLOperand = 4;
0475 }
0476 // For destination vector type is the same as first source vector.
0477 // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl)
0478 class RISCVBinaryAAXUnMaskedRoundingMode
0479 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0480 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
0481 llvm_anyint_ty, LLVMMatchType<2>],
0482 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
0483 let ScalarOperand = 2;
0484 let VLOperand = 4;
0485 }
0486 // For destination vector type is the same as first source vector (with mask).
0487 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
0488 class RISCVBinaryAAXMaskedRoundingMode
0489 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0490 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
0491 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0492 LLVMMatchType<2>, LLVMMatchType<2>],
0493 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
0494 let ScalarOperand = 2;
0495 let VLOperand = 5;
0496 }
0497 // For destination vector type is the same as first source vector. The
0498 // second source operand must match the destination type or be an XLen scalar.
0499 // Input: (passthru, vector_in, vector_in/scalar_in, vl)
0500 class RISCVBinaryAAShiftUnMasked
0501 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0502 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
0503 llvm_anyint_ty],
0504 [IntrNoMem]>, RISCVVIntrinsic {
0505 let VLOperand = 3;
0506 }
0507 // For destination vector type is the same as first source vector (with mask).
0508 // The second source operand must match the destination type or be an XLen scalar.
0509 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
0510 class RISCVBinaryAAShiftMasked
0511 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0512 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
0513 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0514 LLVMMatchType<2>],
0515 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
0516 let VLOperand = 4;
0517 }
0518 // For destination vector type is NOT the same as first source vector.
0519 // Input: (passthru, vector_in, vector_in/scalar_in, vl)
0520 class RISCVBinaryABXUnMasked
0521 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0522 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
0523 llvm_anyint_ty],
0524 [IntrNoMem]>, RISCVVIntrinsic {
0525 let ScalarOperand = 2;
0526 let VLOperand = 3;
0527 }
0528 // For destination vector type is NOT the same as first source vector (with mask).
0529 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
0530 class RISCVBinaryABXMasked
0531 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0532 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
0533 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0534 LLVMMatchType<3>],
0535 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
0536 let ScalarOperand = 2;
0537 let VLOperand = 4;
0538 }
0539 // For destination vector type is NOT the same as first source vector.
0540 // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl)
0541 class RISCVBinaryABXUnMaskedRoundingMode
0542 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0543 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
0544 llvm_anyint_ty, LLVMMatchType<3>],
0545 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
0546 let ScalarOperand = 2;
0547 let VLOperand = 4;
0548 }
0549 // For destination vector type is NOT the same as first source vector (with mask).
0550 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
0551 class RISCVBinaryABXMaskedRoundingMode
0552 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0553 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
0554 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0555 LLVMMatchType<3>, LLVMMatchType<3>],
0556 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
0557 let ScalarOperand = 2;
0558 let VLOperand = 5;
0559 }
0560 // For destination vector type is NOT the same as first source vector. The
0561 // second source operand must match the destination type or be an XLen scalar.
0562 // Input: (passthru, vector_in, vector_in/scalar_in, vl)
0563 class RISCVBinaryABShiftUnMasked
0564 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0565 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
0566 llvm_anyint_ty],
0567 [IntrNoMem]>, RISCVVIntrinsic {
0568 let VLOperand = 3;
0569 }
0570 // For destination vector type is NOT the same as first source vector (with mask).
0571 // The second source operand must match the destination type or be an XLen scalar.
0572 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
0573 class RISCVBinaryABShiftMasked
0574 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0575 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
0576 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0577 LLVMMatchType<3>],
0578 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
0579 let VLOperand = 4;
0580 }
0581 // For binary operations with V0 as input.
0582 // Input: (passthru, vector_in, vector_in/scalar_in, V0, vl)
0583 class RISCVBinaryWithV0
0584 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0585 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
0586 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
0587 llvm_anyint_ty],
0588 [IntrNoMem]>, RISCVVIntrinsic {
0589 let ScalarOperand = 2;
0590 let VLOperand = 4;
0591 }
0592 // For binary operations with mask type output and V0 as input.
0593 // Output: (mask type output)
0594 // Input: (vector_in, vector_in/scalar_in, V0, vl)
0595 class RISCVBinaryMOutWithV0
0596 :DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
0597 [llvm_anyvector_ty, llvm_any_ty,
0598 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
0599 llvm_anyint_ty],
0600 [IntrNoMem]>, RISCVVIntrinsic {
0601 let ScalarOperand = 1;
0602 let VLOperand = 3;
0603 }
0604 // For binary operations with mask type output.
0605 // Output: (mask type output)
0606 // Input: (vector_in, vector_in/scalar_in, vl)
0607 class RISCVBinaryMOut
0608 : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
0609 [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
0610 [IntrNoMem]>, RISCVVIntrinsic {
0611 let ScalarOperand = 1;
0612 let VLOperand = 2;
0613 }
0614 // For binary operations with mask type output without mask.
0615 // Output: (mask type output)
0616 // Input: (vector_in, vector_in/scalar_in, vl)
0617 class RISCVCompareUnMasked
0618 : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
0619 [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
0620 [IntrNoMem]>, RISCVVIntrinsic {
0621 let ScalarOperand = 1;
0622 let VLOperand = 2;
0623 }
0624 // For binary operations with mask type output with mask.
0625 // Output: (mask type output)
0626 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
0627 class RISCVCompareMasked
0628 : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
0629 [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
0630 llvm_anyvector_ty, llvm_any_ty,
0631 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
0632 [IntrNoMem]>, RISCVVIntrinsic {
0633 let ScalarOperand = 2;
0634 let VLOperand = 4;
0635 }
0636 // For FP classify operations.
0637 // Output: (bit mask type output)
0638 // Input: (passthru, vector_in, vl)
0639 class RISCVClassifyUnMasked
0640 : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
0641 [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
0642 llvm_anyint_ty],
0643 [IntrNoMem]>, RISCVVIntrinsic {
0644 let VLOperand = 1;
0645 }
0646 // For FP classify operations with mask.
0647 // Output: (bit mask type output)
0648 // Input: (maskedoff, vector_in, mask, vl, policy)
0649 class RISCVClassifyMasked
0650 : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
0651 [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
0652 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
0653 llvm_anyint_ty, LLVMMatchType<1>],
0654 [IntrNoMem, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
0655 let VLOperand = 3;
0656 }
0657 // For Saturating binary operations.
0658 // The destination vector type is the same as first source vector.
0659 // Input: (passthru, vector_in, vector_in/scalar_in, vl)
0660 class RISCVSaturatingBinaryAAXUnMasked
0661 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0662 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
0663 llvm_anyint_ty],
0664 [IntrNoMem]>, RISCVVIntrinsic {
0665 let ScalarOperand = 2;
0666 let VLOperand = 3;
0667 }
0668 // For Saturating binary operations with rounding-mode operand
0669 // The destination vector type is the same as first source vector.
0670 // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
0671 class RISCVSaturatingBinaryAAXUnMaskedRoundingMode
0672 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0673 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
0674 llvm_anyint_ty, LLVMMatchType<2>],
0675 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
0676 let ScalarOperand = 2;
0677 let VLOperand = 4;
0678 }
0679 // For Saturating binary operations with mask.
0680 // The destination vector type is the same as first source vector.
0681 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
0682 class RISCVSaturatingBinaryAAXMasked
0683 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0684 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
0685 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0686 LLVMMatchType<2>],
0687 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
0688 let ScalarOperand = 2;
0689 let VLOperand = 4;
0690 }
0691 // For Saturating binary operations with mask and rounding-mode operand
0692 // The destination vector type is the same as first source vector.
0693 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
0694 class RISCVSaturatingBinaryAAXMaskedRoundingMode
0695 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0696 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
0697 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0698 LLVMMatchType<2>, LLVMMatchType<2>],
0699 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
0700 let ScalarOperand = 2;
0701 let VLOperand = 5;
0702 }
0703 // For Saturating binary operations.
0704 // The destination vector type is the same as first source vector.
0705 // The second source operand matches the destination type or is an XLen scalar.
0706 // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
0707 class RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode
0708 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0709 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
0710 llvm_anyint_ty, LLVMMatchType<2>],
0711 [ImmArg<ArgIndex<3>>, IntrNoMem]>,
0712 RISCVVIntrinsic {
0713 let VLOperand = 4;
0714 }
0715 // For Saturating binary operations with mask.
0716 // The destination vector type is the same as first source vector.
0717 // The second source operand matches the destination type or is an XLen scalar.
0718 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
0719 class RISCVSaturatingBinaryAAShiftMaskedRoundingMode
0720 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0721 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
0722 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0723 LLVMMatchType<2>, LLVMMatchType<2>],
0724 [ImmArg<ArgIndex<4>>,ImmArg<ArgIndex<6>>, IntrNoMem]>,
0725 RISCVVIntrinsic {
0726 let VLOperand = 5;
0727 }
0728 // For Saturating binary operations.
0729 // The destination vector type is NOT the same as first source vector.
0730 // The second source operand matches the destination type or is an XLen scalar.
0731 // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
0732 class RISCVSaturatingBinaryABShiftUnMaskedRoundingMode
0733 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0734 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
0735 llvm_anyint_ty, LLVMMatchType<3>],
0736 [ImmArg<ArgIndex<3>>, IntrNoMem]>,
0737 RISCVVIntrinsic {
0738 let VLOperand = 4;
0739 }
0740 // For Saturating binary operations with mask.
0741 // The destination vector type is NOT the same as first source vector (with mask).
0742 // The second source operand matches the destination type or is an XLen scalar.
0743 // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
0744 class RISCVSaturatingBinaryABShiftMaskedRoundingMode
0745 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0746 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
0747 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0748 LLVMMatchType<3>, LLVMMatchType<3>],
0749 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
0750 let VLOperand = 5;
0751 }
0752 // Input: (vector_in, vector_in, scalar_in, vl, policy)
0753 class RVVSlideUnMasked
0754 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0755 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
0756 LLVMMatchType<1>, LLVMMatchType<1>],
0757 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
0758 let VLOperand = 3;
0759 }
0760 // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy)
0761 class RVVSlideMasked
0762 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0763 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
0764 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
0765 LLVMMatchType<1>, LLVMMatchType<1>],
0766 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
0767 let VLOperand = 4;
0768 }
0769 // UnMasked Vector Multiply-Add operations, its first operand can not be undef.
0770 // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
0771 class RISCVTernaryAAXAUnMasked
0772 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0773 [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
0774 llvm_anyint_ty, LLVMMatchType<2>],
0775 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
0776 let ScalarOperand = 1;
0777 let VLOperand = 3;
0778 }
0779 // Masked Vector Multiply-Add operations, its first operand can not be undef.
0780 // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
0781 class RISCVTernaryAAXAMasked
0782 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0783 [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
0784 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
0785 llvm_anyint_ty, LLVMMatchType<2>],
0786 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
0787 let ScalarOperand = 1;
0788 let VLOperand = 4;
0789 }
0790 // UnMasked Vector Multiply-Add operations, its first operand can not be undef.
0791 // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy)
0792 class RISCVTernaryAAXAUnMaskedRoundingMode
0793 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0794 [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
0795 llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>],
0796 [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>,
0797 RISCVVIntrinsic {
0798 let ScalarOperand = 1;
0799 let VLOperand = 4;
0800 }
0801 // Masked Vector Multiply-Add operations, its first operand can not be undef.
0802 // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy
0803 class RISCVTernaryAAXAMaskedRoundingMode
0804 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0805 [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
0806 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
0807 llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>],
0808 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>,
0809 RISCVVIntrinsic {
0810 let ScalarOperand = 1;
0811 let VLOperand = 5;
0812 }
0813 // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef.
0814 // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
0815 class RISCVTernaryWideUnMasked
0816 : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
0817 [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
0818 llvm_anyint_ty, LLVMMatchType<3>],
0819 [ImmArg<ArgIndex<4>>, IntrNoMem] >, RISCVVIntrinsic {
0820 let ScalarOperand = 1;
0821 let VLOperand = 3;
0822 }
0823 // Masked Widening Vector Multiply-Add operations, its first operand can not be undef.
0824 // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
0825 class RISCVTernaryWideMasked
0826 : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
0827 [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
0828 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
0829 llvm_anyint_ty, LLVMMatchType<3>],
0830 [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
0831 let ScalarOperand = 1;
0832 let VLOperand = 4;
0833 }
0834 // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef.
0835 // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy)
0836 class RISCVTernaryWideUnMaskedRoundingMode
0837 : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
0838 [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
0839 llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>],
0840 [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem] >,
0841 RISCVVIntrinsic {
0842 let ScalarOperand = 1;
0843 let VLOperand = 4;
0844 }
0845 // Masked Widening Vector Multiply-Add operations, its first operand can not be undef.
0846 // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy
0847 class RISCVTernaryWideMaskedRoundingMode
0848 : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
0849 [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
0850 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
0851 llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>],
0852 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>,
0853 RISCVVIntrinsic {
0854 let ScalarOperand = 1;
0855 let VLOperand = 5;
0856 }
0857 // For Reduction ternary operations.
0858 // For destination vector type is the same as first and third source vector.
0859 // Input: (vector_in, vector_in, vector_in, vl)
0860 class RISCVReductionUnMasked
0861 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0862 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
0863 llvm_anyint_ty],
0864 [IntrNoMem]>, RISCVVIntrinsic {
0865 let VLOperand = 3;
0866 }
0867 // For Reduction ternary operations with mask.
0868 // For destination vector type is the same as first and third source vector.
0869 // The mask type come from second source vector.
0870 // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
0871 class RISCVReductionMasked
0872 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0873 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
0874 LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
0875 [IntrNoMem]>, RISCVVIntrinsic {
0876 let VLOperand = 4;
0877 }
0878 // For Reduction ternary operations.
0879 // For destination vector type is the same as first and third source vector.
0880 // Input: (vector_in, vector_in, vector_in, frm, vl)
0881 class RISCVReductionUnMaskedRoundingMode
0882 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0883 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
0884 llvm_anyint_ty, LLVMMatchType<2>],
0885 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
0886 let VLOperand = 4;
0887 }
0888 // For Reduction ternary operations with mask.
0889 // For destination vector type is the same as first and third source vector.
0890 // The mask type come from second source vector.
0891 // Input: (vector_in, vector_in, vector_in, mask, frm, vl)
0892 class RISCVReductionMaskedRoundingMode
0893 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0894 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
0895 LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty,
0896 LLVMMatchType<2>],
0897 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
0898 let VLOperand = 5;
0899 }
0900 // For unary operations with scalar type output without mask
0901 // Output: (scalar type)
0902 // Input: (vector_in, vl)
0903 class RISCVMaskedUnarySOutUnMasked
0904 : DefaultAttrsIntrinsic<[LLVMMatchType<1>],
0905 [llvm_anyvector_ty, llvm_anyint_ty],
0906 [IntrNoMem]>, RISCVVIntrinsic {
0907 let VLOperand = 1;
0908 }
0909 // For unary operations with scalar type output with mask
0910 // Output: (scalar type)
0911 // Input: (vector_in, mask, vl)
0912 class RISCVMaskedUnarySOutMasked
0913 : DefaultAttrsIntrinsic<[LLVMMatchType<1>],
0914 [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
0915 [IntrNoMem]>, RISCVVIntrinsic {
0916 let VLOperand = 2;
0917 }
0918 // For destination vector type is NOT the same as source vector.
0919 // Input: (passthru, vector_in, vl)
0920 class RISCVUnaryABUnMasked
0921 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0922 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
0923 [IntrNoMem]>, RISCVVIntrinsic {
0924 let VLOperand = 2;
0925 }
0926 // For destination vector type is NOT the same as source vector (with mask).
0927 // Input: (maskedoff, vector_in, mask, vl, policy)
0928 class RISCVUnaryABMasked
0929 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0930 [LLVMMatchType<0>, llvm_anyvector_ty,
0931 LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
0932 llvm_anyint_ty, LLVMMatchType<2>],
0933 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
0934 let VLOperand = 3;
0935 }
0936 // For unary operations with the same vector type in/out without mask
0937 // Output: (vector)
0938 // Input: (vector_in, vl)
0939 class RISCVUnaryUnMasked
0940 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0941 [LLVMMatchType<0>, llvm_anyint_ty],
0942 [IntrNoMem]>, RISCVVIntrinsic {
0943 let VLOperand = 1;
0944 }
0945 // For mask unary operations with mask type in/out with mask
0946 // Output: (mask type output)
0947 // Input: (mask type maskedoff, mask type vector_in, mask, vl)
0948 class RISCVMaskedUnaryMOutMasked
0949 : DefaultAttrsIntrinsic<[llvm_anyint_ty],
0950 [LLVMMatchType<0>, LLVMMatchType<0>,
0951 LLVMMatchType<0>, llvm_anyint_ty],
0952 [IntrNoMem]>, RISCVVIntrinsic {
0953 let VLOperand = 3;
0954 }
0955 // Output: (vector)
0956 // Input: (vl)
0957 class RISCVNullaryIntrinsic
0958 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0959 [llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic {
0960 let VLOperand = 1;
0961 }
0962 // Output: (vector)
0963 // Input: (passthru, vl)
0964 class RISCVID
0965 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0966 [LLVMMatchType<0>, llvm_anyint_ty],
0967 [IntrNoMem]>, RISCVVIntrinsic {
0968 let VLOperand = 1;
0969 }
0970 // For Conversion unary operations.
0971 // Input: (passthru, vector_in, vl)
0972 class RISCVConversionUnMasked
0973 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0974 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
0975 [IntrNoMem]>, RISCVVIntrinsic {
0976 let VLOperand = 2;
0977 }
0978 // For Conversion unary operations with mask.
0979 // Input: (maskedoff, vector_in, mask, vl, policy)
0980 class RISCVConversionMasked
0981 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0982 [LLVMMatchType<0>, llvm_anyvector_ty,
0983 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
0984 LLVMMatchType<2>],
0985 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
0986 let VLOperand = 3;
0987 }
0988 // For Conversion unary operations.
0989 // Input: (passthru, vector_in, frm, vl)
0990 class RISCVConversionUnMaskedRoundingMode
0991 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
0992 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty,
0993 LLVMMatchType<2>],
0994 [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic {
0995 let VLOperand = 3;
0996 }
0997 // For Conversion unary operations with mask.
0998 // Input: (maskedoff, vector_in, mask, frm, vl, policy)
0999 class RISCVConversionMaskedRoundingMode
1000 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1001 [LLVMMatchType<0>, llvm_anyvector_ty,
1002 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
1003 LLVMMatchType<2>, LLVMMatchType<2>],
1004 [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
1005 let VLOperand = 4;
1006 }
1007
1008 // For unit stride segment load
1009 // Input: (passthru, pointer, vl, sew)
1010 class RISCVUSSegLoad
1011 : DefaultAttrsIntrinsic<[llvm_any_ty],
1012 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty,
1013 LLVMMatchType<1>],
1014 [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<3>>, IntrReadMem,
1015 IntrArgMemOnly]>,
1016 RISCVVIntrinsic {
1017 let VLOperand = 2;
1018 }
1019 // For unit stride segment load with mask
1020 // Input: (maskedoff, pointer, mask, vl, policy, sew)
1021 class RISCVUSSegLoadMasked
1022 : DefaultAttrsIntrinsic<[llvm_any_ty],
1023 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty,
1024 llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>],
1025 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>,
1026 NoCapture<ArgIndex<1>>, IntrReadMem, IntrArgMemOnly]>,
1027 RISCVVIntrinsic {
1028 let VLOperand = 3;
1029 }
1030
1031 // For unit stride fault-only-first segment load
1032 // Input: (passthru, pointer, vl, sew)
1033 // Output: (data, vl)
1034 // NOTE: We model this with default memory properties since we model writing
1035 // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
1036 class RISCVUSSegLoadFF
1037 : DefaultAttrsIntrinsic<[llvm_any_ty, llvm_anyint_ty],
1038 [LLVMMatchType<0>, llvm_ptr_ty, LLVMMatchType<1>, LLVMMatchType<1>],
1039 [ImmArg<ArgIndex<3>>, NoCapture<ArgIndex<1>>]>, RISCVVIntrinsic {
1040 let VLOperand = 2;
1041 }
1042 // For unit stride fault-only-first segment load with mask
1043 // Input: (maskedoff, pointer, mask, vl, policy, sew)
1044 // Output: (data, vl)
1045 // NOTE: We model this with default memory properties since we model writing
1046 // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
1047 class RISCVUSSegLoadFFMasked
1048 : DefaultAttrsIntrinsic<[llvm_any_ty, llvm_anyint_ty],
1049 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty,
1050 LLVMMatchType<1>, LLVMMatchType<1>, LLVMMatchType<1>],
1051 [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>, NoCapture<ArgIndex<1>>]>,
1052 RISCVVIntrinsic {
1053 let VLOperand = 3;
1054 }
1055
1056 // For stride segment load
1057 // Input: (passthru, pointer, offset, vl, sew)
1058 class RISCVSSegLoad
1059 : DefaultAttrsIntrinsic<[llvm_any_ty],
1060 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty,
1061 LLVMMatchType<1>, LLVMMatchType<1>],
1062 [ImmArg<ArgIndex<4>>, NoCapture<ArgIndex<1>>, IntrReadMem]>,
1063 RISCVVIntrinsic {
1064 let VLOperand = 3;
1065 }
1066 // For stride segment load with mask
1067 // Input: (maskedoff, pointer, offset, mask, vl, policy, sew)
1068 class RISCVSSegLoadMasked
1069 : DefaultAttrsIntrinsic<[llvm_any_ty],
1070 [LLVMMatchType<0>, llvm_ptr_ty,
1071 llvm_anyint_ty, llvm_anyvector_ty,
1072 LLVMMatchType<1>, LLVMMatchType<1>,
1073 LLVMMatchType<1>],
1074 [ImmArg<ArgIndex<5>>, ImmArg<ArgIndex<6>>,
1075 NoCapture<ArgIndex<1>>, IntrReadMem]>,
1076 RISCVVIntrinsic {
1077 let VLOperand = 4;
1078 }
1079
1080 // For indexed segment load
1081 // Input: (passthru, pointer, index, vl, sew)
1082 class RISCVISegLoad
1083 : DefaultAttrsIntrinsic<[llvm_any_ty],
1084 [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty,
1085 llvm_anyint_ty, LLVMMatchType<2>],
1086 [ImmArg<ArgIndex<4>>, NoCapture<ArgIndex<1>>, IntrReadMem]>,
1087 RISCVVIntrinsic {
1088 let VLOperand = 3;
1089 }
1090 // For indexed segment load with mask
1091 // Input: (maskedoff, pointer, index, mask, vl, policy, sew)
1092 class RISCVISegLoadMasked
1093 : DefaultAttrsIntrinsic<[llvm_any_ty],
1094 [LLVMMatchType<0>, llvm_ptr_ty,
1095 llvm_anyvector_ty, llvm_anyvector_ty,
1096 llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>],
1097 [ImmArg<ArgIndex<5>>, ImmArg<ArgIndex<6>>,
1098 NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
1099 let VLOperand = 4;
1100 }
1101
1102 // For unit stride segment store
1103 // Input: (value, pointer, vl, sew)
1104 class RISCVUSSegStore
1105 : DefaultAttrsIntrinsic<[],
1106 [llvm_any_ty, llvm_ptr_ty, llvm_anyint_ty,
1107 LLVMMatchType<1>],
1108 [ImmArg<ArgIndex<3>>, NoCapture<ArgIndex<1>>, IntrWriteMem,
1109 IntrArgMemOnly]>,
1110 RISCVVIntrinsic {
1111 let VLOperand = 2;
1112 }
1113 // For unit stride segment store with mask
1114 // Input: (value, pointer, mask, vl, sew)
1115 class RISCVUSSegStoreMasked
1116 : DefaultAttrsIntrinsic<[],
1117 [llvm_any_ty, llvm_ptr_ty,
1118 llvm_anyvector_ty, llvm_anyint_ty, LLVMMatchType<2>],
1119 [ImmArg<ArgIndex<4>>, NoCapture<ArgIndex<1>>, IntrWriteMem,
1120 IntrArgMemOnly]>,
1121 RISCVVIntrinsic {
1122 let VLOperand = 3;
1123 }
1124
1125 // For stride segment store
1126 // Input: (value, pointer, offset, vl, sew)
1127 class RISCVSSegStore
1128 : DefaultAttrsIntrinsic<[],
1129 [llvm_any_ty, llvm_ptr_ty, llvm_anyint_ty,
1130 LLVMMatchType<1>, LLVMMatchType<1>],
1131 [ImmArg<ArgIndex<4>>, NoCapture<ArgIndex<1>>, IntrWriteMem]>,
1132 RISCVVIntrinsic {
1133 let VLOperand = 3;
1134 }
1135 // For stride segment store with mask
1136 // Input: (value, pointer, offset, mask, vl, sew)
1137 class RISCVSSegStoreMasked
1138 : DefaultAttrsIntrinsic<[],
1139 [llvm_any_ty, llvm_ptr_ty, llvm_anyint_ty,
1140 llvm_anyvector_ty, LLVMMatchType<1>,
1141 LLVMMatchType<1>],
1142 [ImmArg<ArgIndex<5>>, NoCapture<ArgIndex<1>>, IntrWriteMem]>,
1143 RISCVVIntrinsic {
1144 let VLOperand = 4;
1145 }
1146
1147 // For indexed segment store
1148 // Input: (value, pointer, offset, vl, sew)
1149 class RISCVISegStore
1150 : DefaultAttrsIntrinsic<[],
1151 [llvm_any_ty, llvm_ptr_ty, llvm_anyvector_ty,
1152 llvm_anyint_ty, LLVMMatchType<2>],
1153 [ImmArg<ArgIndex<4>>, NoCapture<ArgIndex<1>>, IntrWriteMem]>,
1154 RISCVVIntrinsic {
1155 let VLOperand = 3;
1156 }
1157 // For indexed segment store with mask
1158 // Input: (value, pointer, offset, mask, vl, sew)
1159 class RISCVISegStoreMasked
1160 : DefaultAttrsIntrinsic<[],
1161 [llvm_any_ty, llvm_ptr_ty, llvm_anyvector_ty,
1162 llvm_anyvector_ty, llvm_anyint_ty,
1163 LLVMMatchType<3>],
1164 [ImmArg<ArgIndex<5>>, NoCapture<ArgIndex<1>>, IntrWriteMem]>,
1165 RISCVVIntrinsic {
1166 let VLOperand = 4;
1167 }
1168
1169 multiclass RISCVUSLoad {
1170 def "int_riscv_" # NAME : RISCVUSLoad;
1171 def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMasked;
1172 }
1173 multiclass RISCVUSLoadFF {
1174 def "int_riscv_" # NAME : RISCVUSLoadFF;
1175 def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMasked;
1176 }
1177 multiclass RISCVSLoad {
1178 def "int_riscv_" # NAME : RISCVSLoad;
1179 def "int_riscv_" # NAME # "_mask" : RISCVSLoadMasked;
1180 }
1181 multiclass RISCVILoad {
1182 def "int_riscv_" # NAME : RISCVILoad;
1183 def "int_riscv_" # NAME # "_mask" : RISCVILoadMasked;
1184 }
1185 multiclass RISCVUSStore {
1186 def "int_riscv_" # NAME : RISCVUSStore;
1187 def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMasked;
1188 }
1189 multiclass RISCVSStore {
1190 def "int_riscv_" # NAME : RISCVSStore;
1191 def "int_riscv_" # NAME # "_mask" : RISCVSStoreMasked;
1192 }
1193
1194 multiclass RISCVIStore {
1195 def "int_riscv_" # NAME : RISCVIStore;
1196 def "int_riscv_" # NAME # "_mask" : RISCVIStoreMasked;
1197 }
1198 multiclass RISCVUnaryAA {
1199 def "int_riscv_" # NAME : RISCVUnaryAAUnMasked;
1200 def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMasked;
1201 }
1202 multiclass RISCVUnaryAARoundingMode {
1203 def "int_riscv_" # NAME : RISCVUnaryAAUnMaskedRoundingMode;
1204 def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMaskedRoundingMode;
1205 }
1206 multiclass RISCVUnaryAB {
1207 def "int_riscv_" # NAME : RISCVUnaryABUnMasked;
1208 def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMasked;
1209 }
1210 // AAX means the destination type(A) is the same as the first source
1211 // type(A). X means any type for the second source operand.
1212 multiclass RISCVBinaryAAX {
1213 def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked;
1214 def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMasked;
1215 }
1216 multiclass RISCVBinaryAAXRoundingMode {
1217 def "int_riscv_" # NAME : RISCVBinaryAAXUnMaskedRoundingMode;
1218 def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMaskedRoundingMode;
1219 }
1220 // Like RISCVBinaryAAX, but the second operand is used a shift amount so it
1221 // must be a vector or an XLen scalar.
1222 multiclass RISCVBinaryAAShift {
1223 def "int_riscv_" # NAME : RISCVBinaryAAShiftUnMasked;
1224 def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMasked;
1225 }
1226 multiclass RISCVRGatherVV {
1227 def "int_riscv_" # NAME : RISCVRGatherVVUnMasked;
1228 def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMasked;
1229 }
1230 multiclass RISCVRGatherVX {
1231 def "int_riscv_" # NAME : RISCVGatherVXUnMasked;
1232 def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMasked;
1233 }
1234 multiclass RISCVRGatherEI16VV {
1235 def "int_riscv_" # NAME : RISCVRGatherEI16VVUnMasked;
1236 def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMasked;
1237 }
1238 // ABX means the destination type(A) is different from the first source
1239 // type(B). X means any type for the second source operand.
1240 multiclass RISCVBinaryABX {
1241 def "int_riscv_" # NAME : RISCVBinaryABXUnMasked;
1242 def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMasked;
1243 }
1244 multiclass RISCVBinaryABXRoundingMode {
1245 def "int_riscv_" # NAME : RISCVBinaryABXUnMaskedRoundingMode;
1246 def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMaskedRoundingMode;
1247 }
1248 // Like RISCVBinaryABX, but the second operand is used a shift amount so it
1249 // must be a vector or an XLen scalar.
1250 multiclass RISCVBinaryABShift {
1251 def "int_riscv_" # NAME : RISCVBinaryABShiftUnMasked;
1252 def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMasked;
1253 }
1254 multiclass RISCVBinaryWithV0 {
1255 def "int_riscv_" # NAME : RISCVBinaryWithV0;
1256 }
1257 multiclass RISCVBinaryMaskOutWithV0 {
1258 def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
1259 }
1260 multiclass RISCVBinaryMaskOut {
1261 def "int_riscv_" # NAME : RISCVBinaryMOut;
1262 }
1263 multiclass RISCVSaturatingBinaryAAX {
1264 def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMasked;
1265 def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMasked;
1266 }
1267 multiclass RISCVSaturatingBinaryAAXRoundingMode {
1268 def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMaskedRoundingMode;
1269 def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMaskedRoundingMode;
1270 }
1271 multiclass RISCVSaturatingBinaryAAShiftRoundingMode {
1272 def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode;
1273 def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMaskedRoundingMode;
1274 }
1275 multiclass RISCVSaturatingBinaryABShiftRoundingMode {
1276 def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMaskedRoundingMode;
1277 def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMaskedRoundingMode;
1278 }
1279 multiclass RVVSlide {
1280 def "int_riscv_" # NAME : RVVSlideUnMasked;
1281 def "int_riscv_" # NAME # "_mask" : RVVSlideMasked;
1282 }
1283 multiclass RISCVTernaryAAXA {
1284 def "int_riscv_" # NAME : RISCVTernaryAAXAUnMasked;
1285 def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMasked;
1286 }
1287 multiclass RISCVTernaryAAXARoundingMode {
1288 def "int_riscv_" # NAME : RISCVTernaryAAXAUnMaskedRoundingMode;
1289 def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMaskedRoundingMode;
1290 }
1291 multiclass RISCVCompare {
1292 def "int_riscv_" # NAME : RISCVCompareUnMasked;
1293 def "int_riscv_" # NAME # "_mask" : RISCVCompareMasked;
1294 }
1295 multiclass RISCVClassify {
1296 def "int_riscv_" # NAME : RISCVClassifyUnMasked;
1297 def "int_riscv_" # NAME # "_mask" : RISCVClassifyMasked;
1298 }
1299 multiclass RISCVTernaryWide {
1300 def "int_riscv_" # NAME : RISCVTernaryWideUnMasked;
1301 def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMasked;
1302 }
1303 multiclass RISCVTernaryWideRoundingMode {
1304 def "int_riscv_" # NAME : RISCVTernaryWideUnMaskedRoundingMode;
1305 def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMaskedRoundingMode;
1306 }
1307 multiclass RISCVReduction {
1308 def "int_riscv_" # NAME : RISCVReductionUnMasked;
1309 def "int_riscv_" # NAME # "_mask" : RISCVReductionMasked;
1310 }
1311 multiclass RISCVReductionRoundingMode {
1312 def "int_riscv_" # NAME : RISCVReductionUnMaskedRoundingMode;
1313 def "int_riscv_" # NAME # "_mask" : RISCVReductionMaskedRoundingMode;
1314 }
1315 multiclass RISCVMaskedUnarySOut {
1316 def "int_riscv_" # NAME : RISCVMaskedUnarySOutUnMasked;
1317 def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnarySOutMasked;
1318 }
1319 multiclass RISCVMaskedUnaryMOut {
1320 def "int_riscv_" # NAME : RISCVUnaryUnMasked;
1321 def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnaryMOutMasked;
1322 }
1323 multiclass RISCVConversion {
1324 def "int_riscv_" #NAME :RISCVConversionUnMasked;
1325 def "int_riscv_" # NAME # "_mask" : RISCVConversionMasked;
1326 }
1327 multiclass RISCVConversionRoundingMode {
1328 def "int_riscv_" #NAME :RISCVConversionUnMaskedRoundingMode;
1329 def "int_riscv_" # NAME # "_mask" : RISCVConversionMaskedRoundingMode;
1330 }
1331 multiclass RISCVUSSegLoad {
1332 def "int_riscv_" # NAME : RISCVUSSegLoad;
1333 def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMasked;
1334 }
1335 multiclass RISCVUSSegLoadFF {
1336 def "int_riscv_" # NAME : RISCVUSSegLoadFF;
1337 def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMasked;
1338 }
1339 multiclass RISCVSSegLoad {
1340 def "int_riscv_" # NAME : RISCVSSegLoad;
1341 def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMasked;
1342 }
1343 multiclass RISCVISegLoad {
1344 def "int_riscv_" # NAME : RISCVISegLoad;
1345 def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMasked;
1346 }
1347 multiclass RISCVUSSegStore {
1348 def "int_riscv_" # NAME : RISCVUSSegStore;
1349 def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMasked;
1350 }
1351 multiclass RISCVSSegStore {
1352 def "int_riscv_" # NAME : RISCVSSegStore;
1353 def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMasked;
1354 }
1355 multiclass RISCVISegStore {
1356 def "int_riscv_" # NAME : RISCVISegStore;
1357 def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMasked;
1358 }
1359
1360 //==-- Intrinsics to perform vector tuple subvector insertion/extraction --=//
1361 def int_riscv_tuple_insert
1362 : DefaultAttrsIntrinsic<[llvm_any_ty],
1363 [LLVMMatchType<0>, llvm_anyvector_ty, llvm_i32_ty],
1364 [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]>;
1365
1366 def int_riscv_tuple_extract
1367 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1368 [llvm_any_ty, llvm_i32_ty],
1369 [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<1>>]>;
1370
1371 defm vle : RISCVUSLoad;
1372 defm vleff : RISCVUSLoadFF;
1373 defm vse : RISCVUSStore;
1374 defm vlse: RISCVSLoad;
1375 defm vsse: RISCVSStore;
1376 defm vluxei : RISCVILoad;
1377 defm vloxei : RISCVILoad;
1378 defm vsoxei : RISCVIStore;
1379 defm vsuxei : RISCVIStore;
1380
1381 def int_riscv_vlm : RISCVUSMLoad;
1382 def int_riscv_vsm : RISCVUSStore;
1383
1384 defm vadd : RISCVBinaryAAX;
1385 defm vsub : RISCVBinaryAAX;
1386 defm vrsub : RISCVBinaryAAX;
1387
1388 defm vwaddu : RISCVBinaryABX;
1389 defm vwadd : RISCVBinaryABX;
1390 defm vwaddu_w : RISCVBinaryAAX;
1391 defm vwadd_w : RISCVBinaryAAX;
1392 defm vwsubu : RISCVBinaryABX;
1393 defm vwsub : RISCVBinaryABX;
1394 defm vwsubu_w : RISCVBinaryAAX;
1395 defm vwsub_w : RISCVBinaryAAX;
1396
1397 defm vzext : RISCVUnaryAB;
1398 defm vsext : RISCVUnaryAB;
1399
1400 defm vadc : RISCVBinaryWithV0;
1401 defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
1402 defm vmadc : RISCVBinaryMaskOut;
1403
1404 defm vsbc : RISCVBinaryWithV0;
1405 defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
1406 defm vmsbc : RISCVBinaryMaskOut;
1407
1408 defm vand : RISCVBinaryAAX;
1409 defm vor : RISCVBinaryAAX;
1410 defm vxor : RISCVBinaryAAX;
1411
1412 defm vsll : RISCVBinaryAAShift;
1413 defm vsrl : RISCVBinaryAAShift;
1414 defm vsra : RISCVBinaryAAShift;
1415
1416 defm vnsrl : RISCVBinaryABShift;
1417 defm vnsra : RISCVBinaryABShift;
1418
1419 defm vmseq : RISCVCompare;
1420 defm vmsne : RISCVCompare;
1421 defm vmsltu : RISCVCompare;
1422 defm vmslt : RISCVCompare;
1423 defm vmsleu : RISCVCompare;
1424 defm vmsle : RISCVCompare;
1425 defm vmsgtu : RISCVCompare;
1426 defm vmsgt : RISCVCompare;
1427 defm vmsgeu : RISCVCompare;
1428 defm vmsge : RISCVCompare;
1429
1430 defm vminu : RISCVBinaryAAX;
1431 defm vmin : RISCVBinaryAAX;
1432 defm vmaxu : RISCVBinaryAAX;
1433 defm vmax : RISCVBinaryAAX;
1434
1435 defm vmul : RISCVBinaryAAX;
1436 defm vmulh : RISCVBinaryAAX;
1437 defm vmulhu : RISCVBinaryAAX;
1438 defm vmulhsu : RISCVBinaryAAX;
1439
1440 defm vdivu : RISCVBinaryAAX;
1441 defm vdiv : RISCVBinaryAAX;
1442 defm vremu : RISCVBinaryAAX;
1443 defm vrem : RISCVBinaryAAX;
1444
1445 defm vwmul : RISCVBinaryABX;
1446 defm vwmulu : RISCVBinaryABX;
1447 defm vwmulsu : RISCVBinaryABX;
1448
1449 defm vmacc : RISCVTernaryAAXA;
1450 defm vnmsac : RISCVTernaryAAXA;
1451 defm vmadd : RISCVTernaryAAXA;
1452 defm vnmsub : RISCVTernaryAAXA;
1453
1454 defm vwmaccu : RISCVTernaryWide;
1455 defm vwmacc : RISCVTernaryWide;
1456 defm vwmaccus : RISCVTernaryWide;
1457 defm vwmaccsu : RISCVTernaryWide;
1458
1459 defm vfadd : RISCVBinaryAAXRoundingMode;
1460 defm vfsub : RISCVBinaryAAXRoundingMode;
1461 defm vfrsub : RISCVBinaryAAXRoundingMode;
1462
1463 defm vfwadd : RISCVBinaryABXRoundingMode;
1464 defm vfwsub : RISCVBinaryABXRoundingMode;
1465 defm vfwadd_w : RISCVBinaryAAXRoundingMode;
1466 defm vfwsub_w : RISCVBinaryAAXRoundingMode;
1467
1468 defm vsaddu : RISCVSaturatingBinaryAAX;
1469 defm vsadd : RISCVSaturatingBinaryAAX;
1470 defm vssubu : RISCVSaturatingBinaryAAX;
1471 defm vssub : RISCVSaturatingBinaryAAX;
1472
1473 defm vmerge : RISCVBinaryWithV0;
1474
1475 // Output: (vector)
1476 // Input: (passthru, vector_in, vl)
1477 def int_riscv_vmv_v_v : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1478 [LLVMMatchType<0>,
1479 LLVMMatchType<0>,
1480 llvm_anyint_ty],
1481 [IntrNoMem]>, RISCVVIntrinsic {
1482 let VLOperand = 2;
1483 }
1484 // Output: (vector)
1485 // Input: (passthru, scalar, vl)
1486 def int_riscv_vmv_v_x : DefaultAttrsIntrinsic<[llvm_anyint_ty],
1487 [LLVMMatchType<0>,
1488 LLVMVectorElementType<0>,
1489 llvm_anyint_ty],
1490 [IntrNoMem]>, RISCVVIntrinsic {
1491 let VLOperand = 2;
1492 }
1493 // Output: (vector)
1494 // Input: (passthru, scalar, vl)
1495 def int_riscv_vfmv_v_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
1496 [LLVMMatchType<0>,
1497 LLVMVectorElementType<0>,
1498 llvm_anyint_ty],
1499 [IntrNoMem]>, RISCVVIntrinsic {
1500 let VLOperand = 2;
1501 }
1502
1503 def int_riscv_vmv_x_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
1504 [llvm_anyint_ty],
1505 [IntrNoMem]>, RISCVVIntrinsic;
1506 def int_riscv_vmv_s_x : DefaultAttrsIntrinsic<[llvm_anyint_ty],
1507 [LLVMMatchType<0>,
1508 LLVMVectorElementType<0>,
1509 llvm_anyint_ty],
1510 [IntrNoMem]>, RISCVVIntrinsic {
1511 let VLOperand = 2;
1512 }
1513
1514 def int_riscv_vfmv_f_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
1515 [llvm_anyfloat_ty],
1516 [IntrNoMem]>, RISCVVIntrinsic;
1517 def int_riscv_vfmv_s_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
1518 [LLVMMatchType<0>,
1519 LLVMVectorElementType<0>,
1520 llvm_anyint_ty],
1521 [IntrNoMem]>, RISCVVIntrinsic {
1522 let VLOperand = 2;
1523 }
1524
1525 defm vfmul : RISCVBinaryAAXRoundingMode;
1526 defm vfdiv : RISCVBinaryAAXRoundingMode;
1527 defm vfrdiv : RISCVBinaryAAXRoundingMode;
1528
1529 defm vfwmul : RISCVBinaryABXRoundingMode;
1530
1531 defm vfmacc : RISCVTernaryAAXARoundingMode;
1532 defm vfnmacc : RISCVTernaryAAXARoundingMode;
1533 defm vfmsac : RISCVTernaryAAXARoundingMode;
1534 defm vfnmsac : RISCVTernaryAAXARoundingMode;
1535 defm vfmadd : RISCVTernaryAAXARoundingMode;
1536 defm vfnmadd : RISCVTernaryAAXARoundingMode;
1537 defm vfmsub : RISCVTernaryAAXARoundingMode;
1538 defm vfnmsub : RISCVTernaryAAXARoundingMode;
1539
1540 defm vfwmacc : RISCVTernaryWideRoundingMode;
1541 defm vfwmaccbf16 : RISCVTernaryWideRoundingMode;
1542 defm vfwnmacc : RISCVTernaryWideRoundingMode;
1543 defm vfwmsac : RISCVTernaryWideRoundingMode;
1544 defm vfwnmsac : RISCVTernaryWideRoundingMode;
1545
1546 defm vfsqrt : RISCVUnaryAARoundingMode;
1547 defm vfrsqrt7 : RISCVUnaryAA;
1548 defm vfrec7 : RISCVUnaryAARoundingMode;
1549
1550 defm vfmin : RISCVBinaryAAX;
1551 defm vfmax : RISCVBinaryAAX;
1552
1553 defm vfsgnj : RISCVBinaryAAX;
1554 defm vfsgnjn : RISCVBinaryAAX;
1555 defm vfsgnjx : RISCVBinaryAAX;
1556
1557 defm vfclass : RISCVClassify;
1558
1559 defm vfmerge : RISCVBinaryWithV0;
1560
1561 defm vslideup : RVVSlide;
1562 defm vslidedown : RVVSlide;
1563
1564 defm vslide1up : RISCVBinaryAAX;
1565 defm vslide1down : RISCVBinaryAAX;
1566 defm vfslide1up : RISCVBinaryAAX;
1567 defm vfslide1down : RISCVBinaryAAX;
1568
1569 defm vrgather_vv : RISCVRGatherVV;
1570 defm vrgather_vx : RISCVRGatherVX;
1571 defm vrgatherei16_vv : RISCVRGatherEI16VV;
1572
1573 def "int_riscv_vcompress" : RISCVCompress;
1574
1575 defm vaaddu : RISCVSaturatingBinaryAAXRoundingMode;
1576 defm vaadd : RISCVSaturatingBinaryAAXRoundingMode;
1577 defm vasubu : RISCVSaturatingBinaryAAXRoundingMode;
1578 defm vasub : RISCVSaturatingBinaryAAXRoundingMode;
1579
1580 defm vsmul : RISCVSaturatingBinaryAAXRoundingMode;
1581
1582 defm vssrl : RISCVSaturatingBinaryAAShiftRoundingMode;
1583 defm vssra : RISCVSaturatingBinaryAAShiftRoundingMode;
1584
1585 defm vnclipu : RISCVSaturatingBinaryABShiftRoundingMode;
1586 defm vnclip : RISCVSaturatingBinaryABShiftRoundingMode;
1587
1588 defm vmfeq : RISCVCompare;
1589 defm vmfne : RISCVCompare;
1590 defm vmflt : RISCVCompare;
1591 defm vmfle : RISCVCompare;
1592 defm vmfgt : RISCVCompare;
1593 defm vmfge : RISCVCompare;
1594
1595 defm vredsum : RISCVReduction;
1596 defm vredand : RISCVReduction;
1597 defm vredor : RISCVReduction;
1598 defm vredxor : RISCVReduction;
1599 defm vredminu : RISCVReduction;
1600 defm vredmin : RISCVReduction;
1601 defm vredmaxu : RISCVReduction;
1602 defm vredmax : RISCVReduction;
1603
1604 defm vwredsumu : RISCVReduction;
1605 defm vwredsum : RISCVReduction;
1606
1607 defm vfredosum : RISCVReductionRoundingMode;
1608 defm vfredusum : RISCVReductionRoundingMode;
1609 defm vfredmin : RISCVReduction;
1610 defm vfredmax : RISCVReduction;
1611
1612 defm vfwredusum : RISCVReductionRoundingMode;
1613 defm vfwredosum : RISCVReductionRoundingMode;
1614
1615 def int_riscv_vmand: RISCVBinaryAAAUnMasked;
1616 def int_riscv_vmnand: RISCVBinaryAAAUnMasked;
1617 def int_riscv_vmandn: RISCVBinaryAAAUnMasked;
1618 def int_riscv_vmxor: RISCVBinaryAAAUnMasked;
1619 def int_riscv_vmor: RISCVBinaryAAAUnMasked;
1620 def int_riscv_vmnor: RISCVBinaryAAAUnMasked;
1621 def int_riscv_vmorn: RISCVBinaryAAAUnMasked;
1622 def int_riscv_vmxnor: RISCVBinaryAAAUnMasked;
1623 def int_riscv_vmclr : RISCVNullaryIntrinsic;
1624 def int_riscv_vmset : RISCVNullaryIntrinsic;
1625
1626 defm vcpop : RISCVMaskedUnarySOut;
1627 defm vfirst : RISCVMaskedUnarySOut;
1628 defm vmsbf : RISCVMaskedUnaryMOut;
1629 defm vmsof : RISCVMaskedUnaryMOut;
1630 defm vmsif : RISCVMaskedUnaryMOut;
1631
1632 defm vfcvt_xu_f_v : RISCVConversionRoundingMode;
1633 defm vfcvt_x_f_v : RISCVConversionRoundingMode;
1634 defm vfcvt_rtz_xu_f_v : RISCVConversion;
1635 defm vfcvt_rtz_x_f_v : RISCVConversion;
1636 defm vfcvt_f_xu_v : RISCVConversionRoundingMode;
1637 defm vfcvt_f_x_v : RISCVConversionRoundingMode;
1638
1639 defm vfwcvt_f_xu_v : RISCVConversion;
1640 defm vfwcvt_f_x_v : RISCVConversion;
1641 defm vfwcvt_xu_f_v : RISCVConversionRoundingMode;
1642 defm vfwcvt_x_f_v : RISCVConversionRoundingMode;
1643 defm vfwcvt_rtz_xu_f_v : RISCVConversion;
1644 defm vfwcvt_rtz_x_f_v : RISCVConversion;
1645 defm vfwcvt_f_f_v : RISCVConversion;
1646 defm vfwcvtbf16_f_f_v : RISCVConversion;
1647
1648 defm vfncvt_f_xu_w : RISCVConversionRoundingMode;
1649 defm vfncvt_f_x_w : RISCVConversionRoundingMode;
1650 defm vfncvt_xu_f_w : RISCVConversionRoundingMode;
1651 defm vfncvt_x_f_w : RISCVConversionRoundingMode;
1652 defm vfncvt_rtz_xu_f_w : RISCVConversion;
1653 defm vfncvt_rtz_x_f_w : RISCVConversion;
1654 defm vfncvt_f_f_w : RISCVConversionRoundingMode;
1655 defm vfncvtbf16_f_f_w : RISCVConversionRoundingMode;
1656 defm vfncvt_rod_f_f_w : RISCVConversion;
1657
1658 // Output: (vector)
1659 // Input: (passthru, mask type input, vl)
1660 def int_riscv_viota
1661 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1662 [LLVMMatchType<0>,
1663 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1664 llvm_anyint_ty],
1665 [IntrNoMem]>, RISCVVIntrinsic {
1666 let VLOperand = 2;
1667 }
1668 // Output: (vector)
1669 // Input: (maskedoff, mask type vector_in, mask, vl, policy)
1670 def int_riscv_viota_mask
1671 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1672 [LLVMMatchType<0>,
1673 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1674 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1675 llvm_anyint_ty, LLVMMatchType<1>],
1676 [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
1677 let VLOperand = 3;
1678 }
1679 // Output: (vector)
1680 // Input: (passthru, vl)
1681 def int_riscv_vid : RISCVID;
1682
1683 // Output: (vector)
1684 // Input: (maskedoff, mask, vl, policy)
1685 def int_riscv_vid_mask
1686 : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
1687 [LLVMMatchType<0>,
1688 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1689 llvm_anyint_ty, LLVMMatchType<1>],
1690 [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
1691 let VLOperand = 2;
1692 }
1693
1694 foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1695 defm vlseg # nf : RISCVUSSegLoad;
1696 defm vlseg # nf # ff : RISCVUSSegLoadFF;
1697 defm vlsseg # nf : RISCVSSegLoad;
1698 defm vloxseg # nf : RISCVISegLoad;
1699 defm vluxseg # nf : RISCVISegLoad;
1700 defm vsseg # nf : RISCVUSSegStore;
1701 defm vssseg # nf : RISCVSSegStore;
1702 defm vsoxseg # nf : RISCVISegStore;
1703 defm vsuxseg # nf : RISCVISegStore;
1704 }
1705
1706 // Segment loads/stores for fixed vectors.
1707 foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
1708 def int_riscv_seg # nf # _load
1709 : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
1710 !listsplat(LLVMMatchType<0>,
1711 !add(nf, -1))),
1712 [llvm_anyptr_ty, llvm_anyint_ty],
1713 [NoCapture<ArgIndex<0>>, IntrReadMem]>;
1714 def int_riscv_seg # nf # _store
1715 : DefaultAttrsIntrinsic<[],
1716 !listconcat([llvm_anyvector_ty],
1717 !listsplat(LLVMMatchType<0>,
1718 !add(nf, -1)),
1719 [llvm_anyptr_ty, llvm_anyint_ty]),
1720 [NoCapture<ArgIndex<nf>>, IntrWriteMem]>;
1721 }
1722
1723 } // TargetPrefix = "riscv"
1724
1725 //===----------------------------------------------------------------------===//
1726 // Scalar Cryptography
1727 //
1728 // These intrinsics will lower directly into the corresponding instructions
1729 // added by the scalar cyptography extension, if the extension is present.
1730
1731 let TargetPrefix = "riscv" in {
1732
1733 class ScalarCryptoByteSelect32
1734 : DefaultAttrsIntrinsic<[llvm_i32_ty],
1735 [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1736 [IntrNoMem, IntrSpeculatable,
1737 ImmArg<ArgIndex<2>>]>;
1738
1739 class ScalarCryptoGprGprIntrinsic32
1740 : DefaultAttrsIntrinsic<[llvm_i32_ty],
1741 [llvm_i32_ty, llvm_i32_ty],
1742 [IntrNoMem, IntrSpeculatable]>;
1743
1744 class ScalarCryptoGprGprIntrinsic64
1745 : DefaultAttrsIntrinsic<[llvm_i64_ty],
1746 [llvm_i64_ty, llvm_i64_ty],
1747 [IntrNoMem, IntrSpeculatable]>;
1748
1749 class ScalarCryptoGprIntrinsic32
1750 : DefaultAttrsIntrinsic<[llvm_i32_ty],
1751 [llvm_i32_ty],
1752 [IntrNoMem, IntrSpeculatable]>;
1753
1754 class ScalarCryptoGprIntrinsic64
1755 : DefaultAttrsIntrinsic<[llvm_i64_ty],
1756 [llvm_i64_ty],
1757 [IntrNoMem, IntrSpeculatable]>;
1758
1759 // Zknd
1760 def int_riscv_aes32dsi : ScalarCryptoByteSelect32,
1761 ClangBuiltin<"__builtin_riscv_aes32dsi">;
1762 def int_riscv_aes32dsmi : ScalarCryptoByteSelect32,
1763 ClangBuiltin<"__builtin_riscv_aes32dsmi">;
1764
1765 def int_riscv_aes64ds : ScalarCryptoGprGprIntrinsic64,
1766 ClangBuiltin<"__builtin_riscv_aes64ds">;
1767 def int_riscv_aes64dsm : ScalarCryptoGprGprIntrinsic64,
1768 ClangBuiltin<"__builtin_riscv_aes64dsm">;
1769
1770 def int_riscv_aes64im : ScalarCryptoGprIntrinsic64,
1771 ClangBuiltin<"__builtin_riscv_aes64im">;
1772
1773 // Zkne
1774 def int_riscv_aes32esi : ScalarCryptoByteSelect32,
1775 ClangBuiltin<"__builtin_riscv_aes32esi">;
1776 def int_riscv_aes32esmi : ScalarCryptoByteSelect32,
1777 ClangBuiltin<"__builtin_riscv_aes32esmi">;
1778
1779 def int_riscv_aes64es : ScalarCryptoGprGprIntrinsic64,
1780 ClangBuiltin<"__builtin_riscv_aes64es">;
1781 def int_riscv_aes64esm : ScalarCryptoGprGprIntrinsic64,
1782 ClangBuiltin<"__builtin_riscv_aes64esm">;
1783
1784 // Zknd & Zkne
1785 def int_riscv_aes64ks2 : ScalarCryptoGprGprIntrinsic64,
1786 ClangBuiltin<"__builtin_riscv_aes64ks2">;
1787 def int_riscv_aes64ks1i : DefaultAttrsIntrinsic<[llvm_i64_ty],
1788 [llvm_i64_ty, llvm_i32_ty],
1789 [IntrNoMem, IntrSpeculatable,
1790 ImmArg<ArgIndex<1>>]>,
1791 ClangBuiltin<"__builtin_riscv_aes64ks1i">;
1792
1793 // Zknh
1794 def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsic32;
1795 def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsic32;
1796 def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsic32;
1797 def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsic32;
1798
1799 def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32,
1800 ClangBuiltin<"__builtin_riscv_sha512sig0l">;
1801 def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32,
1802 ClangBuiltin<"__builtin_riscv_sha512sig0h">;
1803 def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32,
1804 ClangBuiltin<"__builtin_riscv_sha512sig1l">;
1805 def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32,
1806 ClangBuiltin<"__builtin_riscv_sha512sig1h">;
1807 def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32,
1808 ClangBuiltin<"__builtin_riscv_sha512sum0r">;
1809 def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32,
1810 ClangBuiltin<"__builtin_riscv_sha512sum1r">;
1811
1812 def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64,
1813 ClangBuiltin<"__builtin_riscv_sha512sig0">;
1814 def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64,
1815 ClangBuiltin<"__builtin_riscv_sha512sig1">;
1816 def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64,
1817 ClangBuiltin<"__builtin_riscv_sha512sum0">;
1818 def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64,
1819 ClangBuiltin<"__builtin_riscv_sha512sum1">;
1820
1821 // Zksed
1822 def int_riscv_sm4ks : ScalarCryptoByteSelect32;
1823 def int_riscv_sm4ed : ScalarCryptoByteSelect32;
1824
1825 // Zksh
1826 def int_riscv_sm3p0 : ScalarCryptoGprIntrinsic32;
1827 def int_riscv_sm3p1 : ScalarCryptoGprIntrinsic32;
1828 } // TargetPrefix = "riscv"
1829
1830 //===----------------------------------------------------------------------===//
1831 // Vector Cryptography
1832 //
1833 // These intrinsics will lower directly into the corresponding instructions
1834 // added by the vector cyptography extension, if the extension is present.
1835 let TargetPrefix = "riscv" in {
1836 // Zvkb
1837 defm vandn : RISCVBinaryAAX;
1838 defm vbrev8 : RISCVUnaryAA;
1839 defm vrev8 : RISCVUnaryAA;
1840 defm vrol : RISCVBinaryAAX;
1841 defm vror : RISCVBinaryAAX;
1842
1843 // Zvbb
1844 defm vbrev : RISCVUnaryAA;
1845 defm vclz : RISCVUnaryAA;
1846 defm vctz : RISCVUnaryAA;
1847 defm vcpopv : RISCVUnaryAA;
1848 defm vwsll : RISCVBinaryABX;
1849
1850 // Zvbc
1851 defm vclmul : RISCVBinaryAAX;
1852 defm vclmulh : RISCVBinaryAAX;
1853
1854 // Zvkg
1855 def int_riscv_vghsh : RISCVBinaryAAXUnMaskedZvk;
1856 def int_riscv_vgmul_vv : RISCVUnaryAAUnMaskedZvk<IsVS=0>;
1857
1858 // Zvkned
1859 defm vaesdf : RISCVUnaryAAUnMaskedZvk;
1860 defm vaesdm : RISCVUnaryAAUnMaskedZvk;
1861 defm vaesef : RISCVUnaryAAUnMaskedZvk;
1862 defm vaesem : RISCVUnaryAAUnMaskedZvk;
1863 def int_riscv_vaeskf1 : RISCVBinaryAAXUnMasked<IsVI=1>;
1864 def int_riscv_vaeskf2 : RISCVBinaryAAXUnMaskedZvk<IsVI=1>;
1865 defm vaesz : RISCVUnaryAAUnMaskedZvk<HasVV=0>;
1866
1867 // Zvknha or Zvknhb
1868 def int_riscv_vsha2ch : RISCVBinaryAAXUnMaskedZvk;
1869 def int_riscv_vsha2cl : RISCVBinaryAAXUnMaskedZvk;
1870 def int_riscv_vsha2ms : RISCVBinaryAAXUnMaskedZvk;
1871
1872 // Zvksed
1873 def int_riscv_vsm4k : RISCVBinaryAAXUnMasked<IsVI=1>;
1874 defm vsm4r : RISCVUnaryAAUnMaskedZvk;
1875
1876 // Zvksh
1877 def int_riscv_vsm3c : RISCVBinaryAAXUnMaskedZvk<IsVI=1>;
1878 def int_riscv_vsm3me : RISCVBinaryAAXUnMasked;
1879 } // TargetPrefix = "riscv"
1880
1881 // Vendor extensions
1882 //===----------------------------------------------------------------------===//
1883 include "llvm/IR/IntrinsicsRISCVXTHead.td"
1884 include "llvm/IR/IntrinsicsRISCVXsf.td"
1885 include "llvm/IR/IntrinsicsRISCVXCV.td"