Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-11-03 10:06:09

0001 
0002 /*---------------------------------------------------------------*/
0003 /*--- begin                                       libvex_ir.h ---*/
0004 /*---------------------------------------------------------------*/
0005 
0006 /*
0007    This file is part of Valgrind, a dynamic binary instrumentation
0008    framework.
0009 
0010    Copyright (C) 2004-2017 OpenWorks LLP
0011       info@open-works.net
0012 
0013    This program is free software; you can redistribute it and/or
0014    modify it under the terms of the GNU General Public License as
0015    published by the Free Software Foundation; either version 2 of the
0016    License, or (at your option) any later version.
0017 
0018    This program is distributed in the hope that it will be useful, but
0019    WITHOUT ANY WARRANTY; without even the implied warranty of
0020    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
0021    General Public License for more details.
0022 
0023    You should have received a copy of the GNU General Public License
0024    along with this program; if not, see <http://www.gnu.org/licenses/>.
0025 
0026    The GNU General Public License is contained in the file COPYING.
0027 
0028    Neither the names of the U.S. Department of Energy nor the
0029    University of California nor the names of its contributors may be
0030    used to endorse or promote products derived from this software
0031    without prior written permission.
0032 */
0033 
0034 #ifndef __LIBVEX_IR_H
0035 #define __LIBVEX_IR_H
0036 
0037 #include "libvex_basictypes.h"
0038 
0039    
0040 /*---------------------------------------------------------------*/
0041 /*--- High-level IR description                               ---*/
0042 /*---------------------------------------------------------------*/
0043 
0044 /* Vex IR is an architecture-neutral intermediate representation.
0045    Unlike some IRs in systems similar to Vex, it is not like assembly
0046    language (ie. a list of instructions).  Rather, it is more like the
0047    IR that might be used in a compiler.
0048 
0049    Code blocks
0050    ~~~~~~~~~~~
0051    The code is broken into small code blocks ("superblocks", type:
0052    'IRSB').  Each code block typically represents from 1 to perhaps 50
0053    instructions.  IRSBs are single-entry, multiple-exit code blocks.
0054    Each IRSB contains three things:
0055    - a type environment, which indicates the type of each temporary
0056      value present in the IRSB
0057    - a list of statements, which represent code
0058    - a jump that exits from the end the IRSB
0059    Because the blocks are multiple-exit, there can be additional
0060    conditional exit statements that cause control to leave the IRSB
0061    before the final exit.  Also because of this, IRSBs can cover
0062    multiple non-consecutive sequences of code (up to 3).  These are
0063    recorded in the type VexGuestExtents (see libvex.h).
0064 
0065    Statements and expressions
0066    ~~~~~~~~~~~~~~~~~~~~~~~~~~
0067    Statements (type 'IRStmt') represent operations with side-effects,
0068    eg.  guest register writes, stores, and assignments to temporaries.
0069    Expressions (type 'IRExpr') represent operations without
0070    side-effects, eg. arithmetic operations, loads, constants.
0071    Expressions can contain sub-expressions, forming expression trees,
0072    eg. (3 + (4 * load(addr1)).
0073 
0074    Storage of guest state
0075    ~~~~~~~~~~~~~~~~~~~~~~
0076    The "guest state" contains the guest registers of the guest machine
0077    (ie.  the machine that we are simulating).  It is stored by default
0078    in a block of memory supplied by the user of the VEX library,
0079    generally referred to as the guest state (area).  To operate on
0080    these registers, one must first read ("Get") them from the guest
0081    state into a temporary value.  Afterwards, one can write ("Put")
0082    them back into the guest state.
0083 
0084    Get and Put are characterised by a byte offset into the guest
0085    state, a small integer which effectively gives the identity of the
0086    referenced guest register, and a type, which indicates the size of
0087    the value to be transferred.
0088 
0089    The basic "Get" and "Put" operations are sufficient to model normal
0090    fixed registers on the guest.  Selected areas of the guest state
0091    can be treated as a circular array of registers (type:
0092    'IRRegArray'), which can be indexed at run-time.  This is done with
0093    the "GetI" and "PutI" primitives.  This is necessary to describe
0094    rotating register files, for example the x87 FPU stack, SPARC
0095    register windows, and the Itanium register files.
0096 
0097    Examples, and flattened vs. unflattened code
0098    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
0099    For example, consider this x86 instruction:
0100      
0101      addl %eax, %ebx
0102 
0103    One Vex IR translation for this code would be this:
0104 
0105      ------ IMark(0x24F275, 7, 0) ------
0106      t3 = GET:I32(0)             # get %eax, a 32-bit integer
0107      t2 = GET:I32(12)            # get %ebx, a 32-bit integer
0108      t1 = Add32(t3,t2)           # addl
0109      PUT(0) = t1                 # put %eax
0110 
0111    (For simplicity, this ignores the effects on the condition codes, and
0112    the update of the instruction pointer.)
0113 
0114    The "IMark" is an IR statement that doesn't represent actual code.
0115    Instead it indicates the address and length of the original
0116    instruction.  The numbers 0 and 12 are offsets into the guest state
0117    for %eax and %ebx.  The full list of offsets for an architecture
0118    <ARCH> can be found in the type VexGuest<ARCH>State in the file
0119    VEX/pub/libvex_guest_<ARCH>.h.
0120 
0121    The five statements in this example are:
0122    - the IMark
0123    - three assignments to temporaries
0124    - one register write (put)
0125 
0126    The six expressions in this example are:
0127    - two register reads (gets)
0128    - one arithmetic (add) operation
0129    - three temporaries (two nested within the Add32, one in the PUT)
0130 
0131    The above IR is "flattened", ie. all sub-expressions are "atoms",
0132    either constants or temporaries.  An equivalent, unflattened version
0133    would be:
0134    
0135      PUT(0) = Add32(GET:I32(0), GET:I32(12))
0136 
0137    IR is guaranteed to be flattened at instrumentation-time.  This makes
0138    instrumentation easier.  Equivalent flattened and unflattened IR
0139    typically results in the same generated code.
0140 
0141    Another example, this one showing loads and stores:
0142 
0143      addl %edx,4(%eax)
0144 
0145    This becomes (again ignoring condition code and instruction pointer
0146    updates):
0147 
0148      ------ IMark(0x4000ABA, 3, 0) ------
0149      t3 = Add32(GET:I32(0),0x4:I32)
0150      t2 = LDle:I32(t3)
0151      t1 = GET:I32(8)
0152      t0 = Add32(t2,t1)
0153      STle(t3) = t0
0154 
0155    The "le" in "LDle" and "STle" is short for "little-endian".
0156 
0157    No need for deallocations
0158    ~~~~~~~~~~~~~~~~~~~~~~~~~
0159    Although there are allocation functions for various data structures
0160    in this file, there are no deallocation functions.  This is because
0161    Vex uses a memory allocation scheme that automatically reclaims the
0162    memory used by allocated structures once translation is completed.
0163    This makes things easier for tools that instruments/transforms code
0164    blocks.
0165 
0166    SSAness and typing
0167    ~~~~~~~~~~~~~~~~~~
0168    The IR is fully typed.  For every IRSB (IR block) it is possible to
0169    say unambiguously whether or not it is correctly typed.
0170    Incorrectly typed IR has no meaning and the VEX will refuse to
0171    process it.  At various points during processing VEX typechecks the
0172    IR and aborts if any violations are found.  This seems overkill but
0173    makes it a great deal easier to build a reliable JIT.
0174 
0175    IR also has the SSA property.  SSA stands for Static Single
0176    Assignment, and what it means is that each IR temporary may be
0177    assigned to only once.  This idea became widely used in compiler
0178    construction in the mid to late 90s.  It makes many IR-level
0179    transformations/code improvements easier, simpler and faster.
0180    Whenever it typechecks an IR block, VEX also checks the SSA
0181    property holds, and will abort if not so.  So SSAness is
0182    mechanically and rigidly enforced.
0183 */
0184 
0185 /*---------------------------------------------------------------*/
0186 /*--- Type definitions for the IR                             ---*/
0187 /*---------------------------------------------------------------*/
0188 
0189 /* General comments about naming schemes:
0190 
0191    All publically visible functions contain the name of the primary
0192    type on which they operate (IRFoo, IRBar, etc).  Hence you should
0193    be able to identify these functions by grepping for "IR[A-Z]".
0194 
0195    For some type 'IRFoo':
0196 
0197    - ppIRFoo is the printing method for IRFoo, printing it to the
0198      output channel specified in the LibVEX_Initialise call.
0199 
0200    - eqIRFoo is a structural equality predicate for IRFoos.
0201 
0202    - deepCopyIRFoo is a deep copy constructor for IRFoos. 
0203      It recursively traverses the entire argument tree and
0204      produces a complete new tree.  All types have a deep copy
0205      constructor.
0206 
0207    - shallowCopyIRFoo is the shallow copy constructor for IRFoos.
0208      It creates a new top-level copy of the supplied object,
0209      but does not copy any sub-objects.  Only some types have a
0210      shallow copy constructor.
0211 */
0212 
0213 /* ------------------ Types ------------------ */
0214 
0215 /* A type indicates the size of a value, and whether it's an integer, a
0216    float, or a vector (SIMD) value. */
0217 typedef 
0218    enum { 
0219       Ity_INVALID=0x1100,
0220       Ity_I1, 
0221       Ity_I8, 
0222       Ity_I16, 
0223       Ity_I32, 
0224       Ity_I64,
0225       Ity_I128,  /* 128-bit scalar */
0226       Ity_F16,   /* 16 bit float */
0227       Ity_F32,   /* IEEE 754 float */
0228       Ity_F64,   /* IEEE 754 double */
0229       Ity_D32,   /* 32-bit Decimal floating point */
0230       Ity_D64,   /* 64-bit Decimal floating point */
0231       Ity_D128,  /* 128-bit Decimal floating point */
0232       Ity_F128,  /* 128-bit floating point; implementation defined */
0233       Ity_V128,  /* 128-bit SIMD */
0234       Ity_V256   /* 256-bit SIMD */
0235    }
0236    IRType;
0237 
0238 /* Pretty-print an IRType */
0239 extern void ppIRType ( IRType );
0240 
0241 /* Get the size (in bytes) of an IRType */ 
0242 extern Int sizeofIRType ( IRType );
0243 
0244 /* Translate 1/2/4/8 into Ity_I{8,16,32,64} respectively.  Asserts on
0245    any other input. */
0246 extern IRType integerIRTypeOfSize ( Int szB );
0247 
0248 
0249 /* ------------------ Endianness ------------------ */
0250 
0251 /* IREndness is used in load IRExprs and store IRStmts. */
0252 typedef
0253    enum { 
0254       Iend_LE=0x1200, /* little endian */
0255       Iend_BE          /* big endian */
0256    }
0257    IREndness;
0258 
0259 
0260 /* ------------------ Constants ------------------ */
0261 
0262 /* IRConsts are used within 'Const' and 'Exit' IRExprs. */
0263 
0264 /* The various kinds of constant. */
0265 typedef
0266    enum { 
0267       Ico_U1=0x1300,
0268       Ico_U8, 
0269       Ico_U16, 
0270       Ico_U32, 
0271       Ico_U64,
0272       Ico_U128,  /* 128-bit restricted integer constant,
0273                     same encoding scheme as V128 */
0274       Ico_F32,   /* 32-bit IEEE754 floating */
0275       Ico_F32i,  /* 32-bit unsigned int to be interpreted literally
0276                     as a IEEE754 single value. */
0277       Ico_F64,   /* 64-bit IEEE754 floating */
0278       Ico_F64i,  /* 64-bit unsigned int to be interpreted literally
0279                     as a IEEE754 double value. */
0280       Ico_V128,  /* 128-bit restricted vector constant, with 1 bit
0281                     (repeated 8 times) for each of the 16 x 1-byte lanes */
0282       Ico_V256   /* 256-bit restricted vector constant, with 1 bit
0283                     (repeated 8 times) for each of the 32 x 1-byte lanes */
0284    }
0285    IRConstTag;
0286 
0287 /* A constant.  Stored as a tagged union.  'tag' indicates what kind of
0288    constant this is.  'Ico' is the union that holds the fields.  If an
0289    IRConst 'c' has c.tag equal to Ico_U32, then it's a 32-bit constant,
0290    and its value can be accessed with 'c.Ico.U32'. */
0291 typedef
0292    struct _IRConst {
0293       IRConstTag tag;
0294       union {
0295          Bool   U1;
0296          UChar  U8;
0297          UShort U16;
0298          UInt   U32;
0299          ULong  U64;
0300          UShort U128;
0301          Float  F32;
0302          UInt   F32i;
0303          Double F64;
0304          ULong  F64i;
0305          UShort V128;   /* 16-bit value; see Ico_V128 comment above */
0306          UInt   V256;   /* 32-bit value; see Ico_V256 comment above */
0307       } Ico;
0308    }
0309    IRConst;
0310 
0311 /* IRConst constructors */
0312 extern IRConst* IRConst_U1   ( Bool );
0313 extern IRConst* IRConst_U8   ( UChar );
0314 extern IRConst* IRConst_U16  ( UShort );
0315 extern IRConst* IRConst_U32  ( UInt );
0316 extern IRConst* IRConst_U64  ( ULong );
0317 extern IRConst* IRConst_U128 ( UShort );
0318 extern IRConst* IRConst_F32  ( Float );
0319 extern IRConst* IRConst_F32i ( UInt );
0320 extern IRConst* IRConst_F64  ( Double );
0321 extern IRConst* IRConst_F64i ( ULong );
0322 extern IRConst* IRConst_V128 ( UShort );
0323 extern IRConst* IRConst_V256 ( UInt );
0324 
0325 /* Deep-copy an IRConst */
0326 extern IRConst* deepCopyIRConst ( const IRConst* );
0327 
0328 /* Pretty-print an IRConst */
0329 extern void ppIRConst ( const IRConst* );
0330 
0331 /* Compare two IRConsts for equality */
0332 extern Bool eqIRConst ( const IRConst*, const IRConst* );
0333 
0334 
0335 /* ------------------ Call targets ------------------ */
0336 
0337 /* Describes a helper function to call.  The name part is purely for
0338    pretty printing and not actually used.  regparms=n tells the back
0339    end that the callee has been declared
0340    "__attribute__((regparm(n)))", although indirectly using the
0341    VEX_REGPARM(n) macro.  On some targets (x86) the back end will need
0342    to construct a non-standard sequence to call a function declared
0343    like this.
0344 
0345    mcx_mask is a sop to Memcheck.  It indicates which args should be
0346    considered 'always defined' when lazily computing definedness of
0347    the result.  Bit 0 of mcx_mask corresponds to args[0], bit 1 to
0348    args[1], etc.  If a bit is set, the corresponding arg is excluded
0349    (hence "x" in "mcx") from definedness checking.  
0350 */
0351 
0352 typedef
0353    struct {
0354       Int          regparms;
0355       const HChar* name;
0356       void*        addr;
0357       UInt         mcx_mask;
0358    }
0359    IRCallee;
0360 
0361 /* Create an IRCallee. */
0362 extern IRCallee* mkIRCallee ( Int regparms, const HChar* name, void* addr );
0363 
0364 /* Deep-copy an IRCallee. */
0365 extern IRCallee* deepCopyIRCallee ( const IRCallee* );
0366 
0367 /* Pretty-print an IRCallee. */
0368 extern void ppIRCallee ( const IRCallee* );
0369 
0370 
0371 /* ------------------ Guest state arrays ------------------ */
0372 
0373 /* This describes a section of the guest state that we want to
0374    be able to index at run time, so as to be able to describe 
0375    indexed or rotating register files on the guest. */
0376 typedef
0377    struct {
0378       Int    base;   /* guest state offset of start of indexed area */
0379       IRType elemTy; /* type of each element in the indexed area */
0380       Int    nElems; /* number of elements in the indexed area */
0381    }
0382    IRRegArray;
0383 
0384 extern IRRegArray* mkIRRegArray ( Int, IRType, Int );
0385 
0386 extern IRRegArray* deepCopyIRRegArray ( const IRRegArray* );
0387 
0388 extern void ppIRRegArray ( const IRRegArray* );
0389 extern Bool eqIRRegArray ( const IRRegArray*, const IRRegArray* );
0390 
0391 
0392 /* ------------------ Temporaries ------------------ */
0393 
0394 /* This represents a temporary, eg. t1.  The IR optimiser relies on the
0395    fact that IRTemps are 32-bit ints.  Do not change them to be ints of
0396    any other size. */
0397 typedef UInt IRTemp;
0398 
0399 /* Pretty-print an IRTemp. */
0400 extern void ppIRTemp ( IRTemp );
0401 
0402 #define IRTemp_INVALID ((IRTemp)0xFFFFFFFF)
0403 
0404 
0405 /* --------------- Primops (arity 1,2,3 and 4) --------------- */
0406 
0407 /* Primitive operations that are used in Unop, Binop, Triop and Qop
0408    IRExprs.  Once we take into account integer, floating point and SIMD
0409    operations of all the different sizes, there are quite a lot of them.
0410    Most instructions supported by the architectures that Vex supports
0411    (x86, PPC, etc) are represented.  Some more obscure ones (eg. cpuid)
0412    are not;  they are instead handled with dirty helpers that emulate
0413    their functionality.  Such obscure ones are thus not directly visible
0414    in the IR, but their effects on guest state (memory and registers) 
0415    are made visible via the annotations in IRDirty structures.
0416 
0417    2018-Dec-27: some of int<->fp conversion operations have been renamed so as
0418    to have a trailing _DEP, meaning "deprecated".  This is because they don't
0419    specify a rounding mode to be used for the conversion and so are
0420    underspecified.  Their use should be replaced with equivalents that do
0421    specify a rounding mode, either as a first argument or using a suffix on the
0422    name, that indicates the rounding mode to use.
0423 */
0424 typedef
0425    enum { 
0426       /* -- Do not change this ordering.  The IR generators rely on
0427             (eg) Iop_Add64 == IopAdd8 + 3. -- */
0428 
0429       Iop_INVALID=0x1400,
0430       Iop_Add8,  Iop_Add16,  Iop_Add32,  Iop_Add64,
0431       Iop_Sub8,  Iop_Sub16,  Iop_Sub32,  Iop_Sub64,
0432       /* Signless mul.  MullS/MullU is elsewhere. */
0433       Iop_Mul8,  Iop_Mul16,  Iop_Mul32,  Iop_Mul64,
0434       Iop_Or8,   Iop_Or16,   Iop_Or32,   Iop_Or64,
0435       Iop_And8,  Iop_And16,  Iop_And32,  Iop_And64,
0436       Iop_Xor8,  Iop_Xor16,  Iop_Xor32,  Iop_Xor64,
0437       Iop_Shl8,  Iop_Shl16,  Iop_Shl32,  Iop_Shl64,
0438       Iop_Shr8,  Iop_Shr16,  Iop_Shr32,  Iop_Shr64,
0439       Iop_Sar8,  Iop_Sar16,  Iop_Sar32,  Iop_Sar64,
0440       /* Integer comparisons. */
0441       Iop_CmpEQ8,  Iop_CmpEQ16,  Iop_CmpEQ32,  Iop_CmpEQ64,
0442       Iop_CmpNE8,  Iop_CmpNE16,  Iop_CmpNE32,  Iop_CmpNE64,
0443       /* Tags for unary ops */
0444       Iop_Not8,  Iop_Not16,  Iop_Not32,  Iop_Not64,
0445 
0446       /* Exactly like CmpEQ8/16/32/64, but carrying the additional
0447          hint that these compute the success/failure of a CAS
0448          operation, and hence are almost certainly applied to two
0449          copies of the same value, which in turn has implications for
0450          Memcheck's instrumentation. */
0451       Iop_CasCmpEQ8, Iop_CasCmpEQ16, Iop_CasCmpEQ32, Iop_CasCmpEQ64,
0452       Iop_CasCmpNE8, Iop_CasCmpNE16, Iop_CasCmpNE32, Iop_CasCmpNE64,
0453 
0454       /* Exactly like CmpNE8/16/32/64, but carrying the additional
0455          hint that these needs expensive definedness tracking. */
0456       Iop_ExpCmpNE8, Iop_ExpCmpNE16, Iop_ExpCmpNE32, Iop_ExpCmpNE64,
0457 
0458       /* -- Ordering not important after here. -- */
0459 
0460       /* Widening multiplies */
0461       Iop_MullS8, Iop_MullS16, Iop_MullS32, Iop_MullS64,
0462       Iop_MullU8, Iop_MullU16, Iop_MullU32, Iop_MullU64,
0463 
0464       /* Counting bits */
0465       /* Ctz64/Ctz32/Clz64/Clz32 are UNDEFINED when given arguments of zero.
0466          You must ensure they are never given a zero argument.  As of
0467          2018-Nov-14 they are deprecated.  Try to use the Nat variants
0468          immediately below, if you can.
0469       */
0470       Iop_Clz64, Iop_Clz32,   /* count leading zeroes */
0471       Iop_Ctz64, Iop_Ctz32,   /* count trailing zeros */
0472       /* Count leading/trailing zeroes, with "natural" semantics for the
0473          case where the input is zero: then the result is the number of bits
0474          in the word. */
0475       Iop_ClzNat64, Iop_ClzNat32,
0476       Iop_CtzNat64, Iop_CtzNat32,
0477       /* Population count -- compute the number of 1 bits in the argument. */
0478       Iop_PopCount64, Iop_PopCount32,
0479 
0480       /* Standard integer comparisons */
0481       Iop_CmpLT32S, Iop_CmpLT64S,
0482       Iop_CmpLE32S, Iop_CmpLE64S,
0483       Iop_CmpLT32U, Iop_CmpLT64U,
0484       Iop_CmpLE32U, Iop_CmpLE64U,
0485 
0486       /* As a sop to Valgrind-Memcheck, the following are useful. */
0487       Iop_CmpNEZ8, Iop_CmpNEZ16,  Iop_CmpNEZ32,  Iop_CmpNEZ64,
0488       Iop_CmpwNEZ32, Iop_CmpwNEZ64, /* all-0s -> all-Os; other -> all-1s */
0489       Iop_Left8, Iop_Left16, Iop_Left32, Iop_Left64, /*  \x -> x | -x */
0490       Iop_Max32U, /* unsigned max */
0491 
0492       /* PowerPC-style 3-way integer comparisons.  Without them it is
0493          difficult to simulate PPC efficiently.
0494          op(x,y) | x < y  = 0x8 else 
0495                  | x > y  = 0x4 else
0496                  | x == y = 0x2
0497       */
0498       Iop_CmpORD32U, Iop_CmpORD64U,
0499       Iop_CmpORD32S, Iop_CmpORD64S,
0500 
0501       /* Division */
0502       /* TODO: clarify semantics wrt rounding, negative values, whatever */
0503       Iop_DivU32,   // :: I32,I32 -> I32 (simple div, no mod)
0504       Iop_DivS32,   // ditto, signed
0505       Iop_DivU64,   // :: I64,I64 -> I64 (simple div, no mod)
0506       Iop_DivS64,   // ditto, signed
0507       Iop_DivU128,   // :: I128,I128 -> I128 (simple div, no mod)
0508       Iop_DivS128,   // ditto, signed
0509 
0510       Iop_DivU32E,  // :: I32,I32 -> I32 (dividend is 32-bit arg (hi)
0511                     // concat with 32 0's (low))
0512       Iop_DivS32E,  // ditto, signed
0513       Iop_DivU64E,  // :: I64,I64 -> I64 (dividend is 64-bit arg (hi)
0514                     //                    concat with 64 0's (low))
0515       Iop_DivS64E,  // ditto, signed
0516       Iop_DivU128E, // :: I128,I128 -> I128 (dividend is 128-bit arg (hi)
0517                     //                    concat with 128 0's (low))
0518       Iop_DivS128E, // ditto, signed
0519 
0520       Iop_DivModU64to32, // :: I64,I32 -> I64
0521                          // of which lo half is div and hi half is mod
0522       Iop_DivModS64to32, // ditto, signed
0523 
0524       Iop_DivModU128to64, // :: V128,I64 -> V128
0525                           // of which lo half is div and hi half is mod
0526       Iop_DivModS128to64, // ditto, signed
0527 
0528       Iop_DivModS64to64, // :: I64,I64 -> I128
0529                          // of which lo half is div and hi half is mod
0530       Iop_DivModU64to64, // :: I64,I64 -> I128
0531                          // of which lo half is div and hi half is mod
0532       Iop_DivModS32to32, // :: I32,I32 -> I64
0533                          // of which lo half is div and hi half is mod
0534       Iop_DivModU32to32, // :: I32,I32 -> I64
0535                          // of which lo half is div and hi half is mod
0536 
0537       Iop_ModU128,     // :: I128,I128 -> I128  normal modulo operation
0538       Iop_ModS128,     // ditto, signed
0539 
0540       /* Integer conversions.  Some of these are redundant (eg
0541          Iop_64to8 is the same as Iop_64to32 and then Iop_32to8), but
0542          having a complete set reduces the typical dynamic size of IR
0543          and makes the instruction selectors easier to write. */
0544 
0545       /* Widening conversions */
0546       Iop_8Uto16, Iop_8Uto32,  Iop_8Uto64,
0547                   Iop_16Uto32, Iop_16Uto64,
0548                                Iop_32Uto64,
0549       Iop_8Sto16, Iop_8Sto32,  Iop_8Sto64,
0550                   Iop_16Sto32, Iop_16Sto64,
0551                                Iop_32Sto64,
0552 
0553       /* Narrowing conversions */
0554       Iop_64to8, Iop_32to8, Iop_64to16,
0555       /* 8 <-> 16 bit conversions */
0556       Iop_16to8,      // :: I16 -> I8, low half
0557       Iop_16HIto8,    // :: I16 -> I8, high half
0558       Iop_8HLto16,    // :: (I8,I8) -> I16
0559       /* 16 <-> 32 bit conversions */
0560       Iop_32to16,     // :: I32 -> I16, low half
0561       Iop_32HIto16,   // :: I32 -> I16, high half
0562       Iop_16HLto32,   // :: (I16,I16) -> I32
0563       /* 32 <-> 64 bit conversions */
0564       Iop_64to32,     // :: I64 -> I32, low half
0565       Iop_64HIto32,   // :: I64 -> I32, high half
0566       Iop_32HLto64,   // :: (I32,I32) -> I64
0567       /* 64 <-> 128 bit conversions */
0568       Iop_128to64,    // :: I128 -> I64, low half
0569       Iop_128HIto64,  // :: I128 -> I64, high half
0570       Iop_64HLto128,  // :: (I64,I64) -> I128
0571       /* 1-bit stuff */
0572       Iop_Not1,   /* :: Ity_Bit -> Ity_Bit */
0573       Iop_And1,   /* :: (Ity_Bit, Ity_Bit) -> Ity_Bit.  Evaluates both args! */
0574       Iop_Or1,    /* :: (Ity_Bit, Ity_Bit) -> Ity_Bit.  Evaluates both args! */
0575       Iop_32to1,  /* :: Ity_I32 -> Ity_Bit, just select bit[0] */
0576       Iop_64to1,  /* :: Ity_I64 -> Ity_Bit, just select bit[0] */
0577       Iop_1Uto8,  /* :: Ity_Bit -> Ity_I8,  unsigned widen */
0578       Iop_1Uto32, /* :: Ity_Bit -> Ity_I32, unsigned widen */
0579       Iop_1Uto64, /* :: Ity_Bit -> Ity_I64, unsigned widen */
0580       Iop_1Sto8,  /* :: Ity_Bit -> Ity_I8,  signed widen */
0581       Iop_1Sto16, /* :: Ity_Bit -> Ity_I16, signed widen */
0582       Iop_1Sto32, /* :: Ity_Bit -> Ity_I32, signed widen */
0583       Iop_1Sto64, /* :: Ity_Bit -> Ity_I64, signed widen */
0584 
0585       /* ------ Floating point.  We try to be IEEE754 compliant. ------ */
0586 
0587       /* --- Simple stuff as mandated by 754. --- */
0588 
0589       /* Binary operations, with rounding. */
0590       /* :: IRRoundingMode(I32) x F64 x F64 -> F64 */ 
0591       Iop_AddF64, Iop_SubF64, Iop_MulF64, Iop_DivF64,
0592 
0593       /* :: IRRoundingMode(I32) x F32 x F32 -> F32 */ 
0594       Iop_AddF32, Iop_SubF32, Iop_MulF32, Iop_DivF32,
0595 
0596       /* Variants of the above which produce a 64-bit result but which
0597          round their result to a IEEE float range first. */
0598       /* :: IRRoundingMode(I32) x F64 x F64 -> F64 */ 
0599       Iop_AddF64r32, Iop_SubF64r32, Iop_MulF64r32, Iop_DivF64r32, 
0600 
0601       /* Unary operations, without rounding. */
0602       /* :: F64 -> F64 */
0603       Iop_NegF64, Iop_AbsF64,
0604 
0605       /* :: F32 -> F32 */
0606       Iop_NegF32, Iop_AbsF32,
0607 
0608       /* :: F16 -> F16 */
0609       Iop_NegF16, Iop_AbsF16,
0610 
0611       /* Unary operations, with rounding. */
0612       /* :: IRRoundingMode(I32) x F64 -> F64 */
0613       Iop_SqrtF64,
0614 
0615       /* :: IRRoundingMode(I32) x F32 -> F32 */
0616       Iop_SqrtF32,
0617 
0618       /* :: IRRoundingMode(I32) x F16 -> F16 */
0619       Iop_SqrtF16,
0620 
0621       /* :: IRRoundingMode(I32) x F16 x F16 -> F16 */
0622       Iop_SubF16, Iop_AddF16,
0623 
0624       /* Comparison, yielding GT/LT/EQ/UN(ordered), as per the following:
0625             0x45 Unordered
0626             0x01 LT
0627             0x00 GT
0628             0x40 EQ
0629          This just happens to be the Intel encoding.  The values
0630          are recorded in the type IRCmpF64Result.
0631       */
0632       /* :: F64 x F64 -> IRCmpF64Result(I32) */
0633       Iop_CmpF64,
0634       Iop_CmpF32,
0635       Iop_CmpF16,
0636       Iop_CmpF128,
0637 
0638       /* --- Int to/from FP conversions. --- */
0639 
0640       /* For the most part, these take a first argument :: Ity_I32 (as
0641          IRRoundingMode) which is an indication of the rounding mode
0642          to use, as per the following encoding ("the standard
0643          encoding"):
0644             00b  to nearest (the default)
0645             01b  to -infinity
0646             10b  to +infinity
0647             11b  to zero
0648          This just happens to be the Intel encoding.  For reference only,
0649          the PPC encoding is:
0650             00b  to nearest (the default)
0651             01b  to zero
0652             10b  to +infinity
0653             11b  to -infinity
0654          Any PPC -> IR front end will have to translate these PPC
0655          encodings, as encoded in the guest state, to the standard
0656          encodings, to pass to the primops.
0657          For reference only, the ARM VFP encoding is:
0658             00b  to nearest
0659             01b  to +infinity
0660             10b  to -infinity
0661             11b  to zero
0662          Again, this will have to be converted to the standard encoding
0663          to pass to primops.
0664 
0665          If one of these conversions gets an out-of-range condition,
0666          or a NaN, as an argument, the result is host-defined.  On x86
0667          the "integer indefinite" value 0x80..00 is produced.  On PPC
0668          it is either 0x80..00 or 0x7F..FF depending on the sign of
0669          the argument.
0670 
0671          On ARMvfp, when converting to a signed integer result, the
0672          overflow result is 0x80..00 for negative args and 0x7F..FF
0673          for positive args.  For unsigned integer results it is
0674          0x00..00 and 0xFF..FF respectively.
0675 
0676          Rounding is required whenever the destination type cannot
0677          represent exactly all values of the source type.
0678       */
0679       Iop_F64toI16S, /* IRRoundingMode(I32) x F64 -> signed I16 */
0680       Iop_F64toI32S, /* IRRoundingMode(I32) x F64 -> signed I32 */
0681       Iop_F64toI64S, /* IRRoundingMode(I32) x F64 -> signed I64 */
0682       Iop_F64toI64U, /* IRRoundingMode(I32) x F64 -> unsigned I64 */
0683 
0684       Iop_F64toI32U, /* IRRoundingMode(I32) x F64 -> unsigned I32 */
0685 
0686       Iop_I32StoF64, /*                       signed I32 -> F64 */
0687       Iop_I64StoF64, /* IRRoundingMode(I32) x signed I64 -> F64 */
0688       Iop_I64UtoF64, /* IRRoundingMode(I32) x unsigned I64 -> F64 */
0689       Iop_I64UtoF32, /* IRRoundingMode(I32) x unsigned I64 -> F32 */
0690 
0691       Iop_I32UtoF32, /* IRRoundingMode(I32) x unsigned I32 -> F32 */
0692       Iop_I32UtoF64, /*                       unsigned I32 -> F64 */
0693 
0694       Iop_F32toI32S, /* IRRoundingMode(I32) x F32 -> signed I32 */
0695       Iop_F32toI64S, /* IRRoundingMode(I32) x F32 -> signed I64 */
0696       Iop_F32toI32U, /* IRRoundingMode(I32) x F32 -> unsigned I32 */
0697       Iop_F32toI64U, /* IRRoundingMode(I32) x F32 -> unsigned I64 */
0698 
0699       Iop_I32StoF32, /* IRRoundingMode(I32) x signed I32 -> F32 */
0700       Iop_I64StoF32, /* IRRoundingMode(I32) x signed I64 -> F32 */
0701 
0702       /* Conversion between floating point formats */
0703       Iop_F32toF64,  /*                       F32 -> F64 */
0704       Iop_F64toF32,  /* IRRoundingMode(I32) x F64 -> F32 */
0705 
0706       /* Reinterpretation.  Take an F32/64/128 and produce an I32/64/128
0707          with the same bit pattern, or vice versa. */
0708       Iop_ReinterpV128asI128, Iop_ReinterpI128asV128,
0709       Iop_ReinterpF128asI128, Iop_ReinterpI128asF128,
0710       Iop_ReinterpF64asI64, Iop_ReinterpI64asF64,
0711       Iop_ReinterpF32asI32, Iop_ReinterpI32asF32,
0712 
0713       /* Support for 128-bit floating point */
0714       Iop_F64HLtoF128,/* (high half of F128,low half of F128) -> F128 */
0715       Iop_F128HItoF64,/* F128 -> high half of F128 into a F64 register */
0716       Iop_F128LOtoF64,/* F128 -> low  half of F128 into a F64 register */
0717 
0718       /* :: IRRoundingMode(I32) x F128 x F128 -> F128 */
0719       Iop_AddF128, Iop_SubF128, Iop_MulF128, Iop_DivF128,
0720       Iop_MAddF128,    // (A * B) + C
0721       Iop_MSubF128,    // (A * B) - C
0722       Iop_NegMAddF128, // -((A * B) + C)
0723       Iop_NegMSubF128, // -((A * B) - C)
0724 
0725       /* :: F128 -> F128 */
0726       Iop_NegF128, Iop_AbsF128,
0727 
0728       /* :: IRRoundingMode(I32) x F128 -> F128 */
0729       Iop_SqrtF128,
0730 
0731       Iop_I32StoF128, /*                signed I32  -> F128 */
0732       Iop_I64StoF128, /*                signed I64  -> F128 */
0733       Iop_I32UtoF128, /*              unsigned I32  -> F128 */
0734       Iop_I64UtoF128, /*              unsigned I64  -> F128 */
0735       Iop_F32toF128,  /*                       F32  -> F128 */
0736       Iop_F64toF128,  /*                       F64  -> F128 */
0737       Iop_I128UtoF128, /*             unsigned I128 -> F128 */
0738       Iop_I128StoF128, /*               signed I128 -> F128 */
0739 
0740       Iop_F128toI32S, /* IRRoundingMode(I32) x F128 -> signed I32  */
0741       Iop_F128toI64S, /* IRRoundingMode(I32) x F128 -> signed I64  */
0742       Iop_F128toI32U, /* IRRoundingMode(I32) x F128 -> unsigned I32  */
0743       Iop_F128toI64U, /* IRRoundingMode(I32) x F128 -> unsigned I64  */
0744       Iop_F128toI128S,/* IRRoundingMode(I32) x F128 -> signed I128 */
0745       Iop_F128toF64,  /* IRRoundingMode(I32) x F128 -> F64         */
0746       Iop_F128toF32,  /* IRRoundingMode(I32) x F128 -> F32         */
0747       Iop_RndF128,    /* IRRoundingMode(I32) x F128 -> F128         */
0748 
0749       /* Truncate to the specified value, source and result
0750        * are stroed in a F128 register.
0751        */
0752       Iop_TruncF128toI32S,  /* truncate F128 -> I32         */
0753       Iop_TruncF128toI32U,  /* truncate F128 -> I32         */
0754       Iop_TruncF128toI64U,  /* truncate F128 -> I64         */
0755       Iop_TruncF128toI64S,  /* truncate F128 -> I64         */
0756       Iop_TruncF128toI128U, /* truncate F128 -> I128        */
0757       Iop_TruncF128toI128S, /* truncate F128 -> I128        */
0758 
0759       /* --- guest x86/amd64 specifics, not mandated by 754. --- */
0760 
0761       /* Binary ops, with rounding. */
0762       /* :: IRRoundingMode(I32) x F64 x F64 -> F64 */ 
0763       Iop_AtanF64,       /* FPATAN,  arctan(arg1/arg2)       */
0764       Iop_Yl2xF64,       /* FYL2X,   arg1 * log2(arg2)       */
0765       Iop_Yl2xp1F64,     /* FYL2XP1, arg1 * log2(arg2+1.0)   */
0766       Iop_PRemF64,       /* FPREM,   non-IEEE remainder(arg1/arg2)    */
0767       Iop_PRemC3210F64,  /* C3210 flags resulting from FPREM, :: I32 */
0768       Iop_PRem1F64,      /* FPREM1,  IEEE remainder(arg1/arg2)    */
0769       Iop_PRem1C3210F64, /* C3210 flags resulting from FPREM1, :: I32 */
0770       Iop_ScaleF64,      /* FSCALE,  arg1 * (2^RoundTowardsZero(arg2)) */
0771       /* Note that on x86 guest, PRem1{C3210} has the same behaviour
0772          as the IEEE mandated RemF64, except it is limited in the
0773          range of its operand.  Hence the partialness. */
0774 
0775       /* Unary ops, with rounding. */
0776       /* :: IRRoundingMode(I32) x F64 -> F64 */
0777       Iop_SinF64,    /* FSIN */
0778       Iop_CosF64,    /* FCOS */
0779       Iop_TanF64,    /* FTAN */
0780       Iop_2xm1F64,   /* (2^arg - 1.0) */
0781       Iop_RoundF128toInt, /* F128 value to nearest integral value (still
0782                              as F128) */
0783       Iop_RoundF64toInt, /* F64 value to nearest integral value (still
0784                             as F64) */
0785       Iop_RoundF64toIntA0, /* As Iop_RoundF64toInt but ties to above zero*/
0786 
0787       Iop_RoundF64toIntE, /* As Iop_RoundF64toInt but ties to even */
0788       Iop_RoundF32toInt, /* F32 value to nearest integral value (still
0789                             as F32) */
0790       Iop_RoundF32toIntA0, /* As Iop_RoundF32toInt but ties to above zero*/
0791       Iop_RoundF32toIntE, /* As Iop_RoundF32toInt but ties to even */
0792 
0793       /* --- guest s390 specifics, not mandated by 754. --- */
0794 
0795       /* Fused multiply-add/sub */
0796       /* :: IRRoundingMode(I32) x F32 x F32 x F32 -> F32
0797             (computes arg2 * arg3 +/- arg4) */ 
0798       Iop_MAddF32, Iop_MSubF32,
0799 
0800       /* --- guest ppc32/64 specifics, not mandated by 754. --- */
0801 
0802       /* Ternary operations, with rounding. */
0803       /* Fused multiply-add/sub, with 112-bit intermediate
0804          precision for ppc.
0805          Also used to implement fused multiply-add/sub for s390. */
0806       /* :: IRRoundingMode(I32) x F64 x F64 x F64 -> F64 
0807             (computes arg2 * arg3 +/- arg4) */ 
0808       Iop_MAddF64, Iop_MSubF64,
0809 
0810       /* Variants of the above which produce a 64-bit result but which
0811          round their result to a IEEE float range first. */
0812       /* :: IRRoundingMode(I32) x F64 x F64 x F64 -> F64 */ 
0813       Iop_MAddF64r32, Iop_MSubF64r32,
0814 
0815       /* :: F64 -> F64 */
0816       Iop_RSqrtEst5GoodF64, /* reciprocal square root estimate, 5 good bits */
0817       Iop_RoundF64toF64_NEAREST, /* frin */
0818       Iop_RoundF64toF64_NegINF,  /* frim */ 
0819       Iop_RoundF64toF64_PosINF,  /* frip */
0820       Iop_RoundF64toF64_ZERO,    /* friz */
0821 
0822       /* :: F64 -> F32 */
0823       Iop_TruncF64asF32, /* do F64->F32 truncation as per 'fsts' */
0824 
0825       /* :: IRRoundingMode(I32) x F64 -> F64 */
0826       Iop_RoundF64toF32, /* round F64 to nearest F32 value (still as F64) */
0827       /* NB: pretty much the same as Iop_F64toF32, except no change 
0828          of type. */
0829 
0830       /* --- guest arm64 specifics, not mandated by 754. --- */
0831 
0832       Iop_RecpExpF64,  /* FRECPX d  :: IRRoundingMode(I32) x F64 -> F64 */
0833       Iop_RecpExpF32,  /* FRECPX s  :: IRRoundingMode(I32) x F32 -> F32 */
0834 
0835       /* --------- Possibly required by IEEE 754-2008. --------- */
0836 
0837       Iop_MaxNumF64,  /* max, F64, numerical operand if other is a qNaN */
0838       Iop_MinNumF64,  /* min, F64, ditto */
0839       Iop_MaxNumF32,  /* max, F32, ditto */
0840       Iop_MinNumF32,  /* min, F32, ditto */
0841 
0842       /* ------------------ 16-bit scalar FP ------------------ */
0843 
0844       Iop_F16toF64,  /*                       F16 -> F64 */
0845       Iop_F64toF16,  /* IRRoundingMode(I32) x F64 -> F16 */
0846 
0847       Iop_F16toF32,  /*                       F16 -> F32 */
0848       Iop_F32toF16,  /* IRRoundingMode(I32) x F32 -> F16 */
0849 
0850       /* ------------------ 32-bit SIMD Integer ------------------ */
0851 
0852       /* 32x1 saturating add/sub (ok, well, not really SIMD :) */
0853       Iop_QAdd32S,
0854       Iop_QSub32S,
0855 
0856       /* 16x2 add/sub, also signed/unsigned saturating variants */
0857       Iop_Add16x2, Iop_Sub16x2,
0858       Iop_QAdd16Sx2, Iop_QAdd16Ux2,
0859       Iop_QSub16Sx2, Iop_QSub16Ux2,
0860 
0861       /* 16x2 signed/unsigned halving add/sub.  For each lane, these
0862          compute bits 16:1 of (eg) sx(argL) + sx(argR),
0863          or zx(argL) - zx(argR) etc. */
0864       Iop_HAdd16Ux2, Iop_HAdd16Sx2,
0865       Iop_HSub16Ux2, Iop_HSub16Sx2,
0866 
0867       /* 8x4 add/sub, also signed/unsigned saturating variants */
0868       Iop_Add8x4, Iop_Sub8x4,
0869       Iop_QAdd8Sx4, Iop_QAdd8Ux4,
0870       Iop_QSub8Sx4, Iop_QSub8Ux4,
0871 
0872       /* 8x4 signed/unsigned halving add/sub.  For each lane, these
0873          compute bits 8:1 of (eg) sx(argL) + sx(argR),
0874          or zx(argL) - zx(argR) etc. */
0875       Iop_HAdd8Ux4, Iop_HAdd8Sx4,
0876       Iop_HSub8Ux4, Iop_HSub8Sx4,
0877 
0878       /* 8x4 sum of absolute unsigned differences. */
0879       Iop_Sad8Ux4,
0880 
0881       /* MISC (vector integer cmp != 0) */
0882       Iop_CmpNEZ16x2, Iop_CmpNEZ8x4,
0883 
0884       /* Byte swap in a 32-bit word */
0885       Iop_Reverse8sIn32_x1,
0886 
0887       /* ------------------ 64-bit SIMD FP ------------------------ */
0888 
0889       /* Conversion to/from int */
0890       // Deprecated: these don't specify a rounding mode
0891       Iop_I32UtoF32x2_DEP,  Iop_I32StoF32x2_DEP,    /* I32x2 -> F32x2 */
0892 
0893       Iop_F32toI32Ux2_RZ,  Iop_F32toI32Sx2_RZ,    /* F32x2 -> I32x2 */
0894 
0895       /* Fixed32 format is floating-point number with fixed number of fraction
0896          bits. The number of fraction bits is passed as a second argument of
0897          type I8. */
0898       Iop_F32ToFixed32Ux2_RZ, Iop_F32ToFixed32Sx2_RZ, /* fp -> fixed-point */
0899       Iop_Fixed32UToF32x2_RN, Iop_Fixed32SToF32x2_RN, /* fixed-point -> fp */
0900 
0901       /* Binary operations */
0902       Iop_Max32Fx2,      Iop_Min32Fx2,
0903       /* Pairwise Min and Max. See integer pairwise operations for more
0904          details. */
0905       Iop_PwMax32Fx2,    Iop_PwMin32Fx2,
0906       /* Note: For the following compares, the arm front-end assumes a
0907          nan in a lane of either argument returns zero for that lane. */
0908       Iop_CmpEQ32Fx2, Iop_CmpGT32Fx2, Iop_CmpGE32Fx2,
0909 
0910       /* Vector Reciprocal Estimate finds an approximate reciprocal of each
0911       element in the operand vector, and places the results in the destination
0912       vector.  */
0913       Iop_RecipEst32Fx2,
0914 
0915       /* Vector Reciprocal Step computes (2.0 - arg1 * arg2).
0916          Note, that if one of the arguments is zero and another one is infinity
0917          of arbitrary sign the result of the operation is 2.0. */
0918       Iop_RecipStep32Fx2,
0919 
0920       /* Vector Reciprocal Square Root Estimate finds an approximate reciprocal
0921          square root of each element in the operand vector. */
0922       Iop_RSqrtEst32Fx2,
0923 
0924       /* Vector Reciprocal Square Root Step computes (3.0 - arg1 * arg2) / 2.0.
0925          Note, that of one of the arguments is zero and another one is infiinty
0926          of arbitrary sign the result of the operation is 1.5. */
0927       Iop_RSqrtStep32Fx2,
0928 
0929       /* Unary */
0930       Iop_Neg32Fx2, Iop_Abs32Fx2,
0931 
0932       /* ------------------ 64-bit SIMD Integer. ------------------ */
0933 
0934       /* MISC (vector integer cmp != 0) */
0935       Iop_CmpNEZ8x8, Iop_CmpNEZ16x4, Iop_CmpNEZ32x2,
0936 
0937       /* ADDITION (normal / unsigned sat / signed sat) */
0938       Iop_Add8x8,   Iop_Add16x4,   Iop_Add32x2,
0939       Iop_QAdd8Ux8, Iop_QAdd16Ux4, Iop_QAdd32Ux2, Iop_QAdd64Ux1,
0940       Iop_QAdd8Sx8, Iop_QAdd16Sx4, Iop_QAdd32Sx2, Iop_QAdd64Sx1,
0941 
0942       /* PAIRWISE operations */
0943       /* Iop_PwFoo16x4( [a,b,c,d], [e,f,g,h] ) =
0944             [Foo16(a,b), Foo16(c,d), Foo16(e,f), Foo16(g,h)] */
0945       Iop_PwAdd8x8,  Iop_PwAdd16x4,  Iop_PwAdd32x2,
0946       Iop_PwMax8Sx8, Iop_PwMax16Sx4, Iop_PwMax32Sx2,
0947       Iop_PwMax8Ux8, Iop_PwMax16Ux4, Iop_PwMax32Ux2,
0948       Iop_PwMin8Sx8, Iop_PwMin16Sx4, Iop_PwMin32Sx2,
0949       Iop_PwMin8Ux8, Iop_PwMin16Ux4, Iop_PwMin32Ux2,
0950       /* Longening variant is unary. The resulting vector contains two times
0951          less elements than operand, but they are two times wider.
0952          Example:
0953             Iop_PAddL16Ux4( [a,b,c,d] ) = [a+b,c+d]
0954                where a+b and c+d are unsigned 32-bit values. */
0955       Iop_PwAddL8Ux8, Iop_PwAddL16Ux4, Iop_PwAddL32Ux2,
0956       Iop_PwAddL8Sx8, Iop_PwAddL16Sx4, Iop_PwAddL32Sx2,
0957 
0958       /* SUBTRACTION (normal / unsigned sat / signed sat) */
0959       Iop_Sub8x8,   Iop_Sub16x4,   Iop_Sub32x2,
0960       Iop_QSub8Ux8, Iop_QSub16Ux4, Iop_QSub32Ux2, Iop_QSub64Ux1,
0961       Iop_QSub8Sx8, Iop_QSub16Sx4, Iop_QSub32Sx2, Iop_QSub64Sx1,
0962 
0963       /* ABSOLUTE VALUE */
0964       Iop_Abs8x8, Iop_Abs16x4, Iop_Abs32x2,
0965 
0966       /* MULTIPLICATION (normal / high half of signed/unsigned / plynomial ) */
0967       Iop_Mul8x8, Iop_Mul16x4, Iop_Mul32x2,
0968       Iop_Mul32Fx2,
0969       Iop_MulHi16Ux4,
0970       Iop_MulHi16Sx4,
0971       /* Plynomial multiplication treats it's arguments as coefficients of
0972          polynoms over {0, 1}. */
0973       Iop_PolynomialMul8x8,
0974 
0975       /* Vector Saturating Doubling Multiply Returning High Half and
0976          Vector Saturating Rounding Doubling Multiply Returning High Half */
0977       /* These IROp's multiply corresponding elements in two vectors, double
0978          the results, and place the most significant half of the final results
0979          in the destination vector. The results are truncated or rounded. If
0980          any of the results overflow, they are saturated. */
0981       Iop_QDMulHi16Sx4, Iop_QDMulHi32Sx2,
0982       Iop_QRDMulHi16Sx4, Iop_QRDMulHi32Sx2,
0983 
0984       /* AVERAGING: note: (arg1 + arg2 + 1) >>u 1 */
0985       Iop_Avg8Ux8,
0986       Iop_Avg16Ux4,
0987 
0988       /* MIN/MAX */
0989       Iop_Max8Sx8, Iop_Max16Sx4, Iop_Max32Sx2,
0990       Iop_Max8Ux8, Iop_Max16Ux4, Iop_Max32Ux2,
0991       Iop_Min8Sx8, Iop_Min16Sx4, Iop_Min32Sx2,
0992       Iop_Min8Ux8, Iop_Min16Ux4, Iop_Min32Ux2,
0993 
0994       /* COMPARISON */
0995       Iop_CmpEQ8x8,  Iop_CmpEQ16x4,  Iop_CmpEQ32x2,
0996       Iop_CmpGT8Ux8, Iop_CmpGT16Ux4, Iop_CmpGT32Ux2,
0997       Iop_CmpGT8Sx8, Iop_CmpGT16Sx4, Iop_CmpGT32Sx2,
0998 
0999       /* COUNT ones / leading zeroes / leading sign bits (not including topmost
1000          bit) */
1001       Iop_Cnt8x8,
1002       Iop_Clz8x8, Iop_Clz16x4, Iop_Clz32x2,
1003       Iop_Cls8x8, Iop_Cls16x4, Iop_Cls32x2,
1004       Iop_Clz64x2,
1005 
1006       /*Vector COUNT trailing zeros */
1007       Iop_Ctz8x16, Iop_Ctz16x8, Iop_Ctz32x4, Iop_Ctz64x2, 
1008 
1009       /* VECTOR x VECTOR SHIFT / ROTATE */
1010       Iop_Shl8x8, Iop_Shl16x4, Iop_Shl32x2,
1011       Iop_Shr8x8, Iop_Shr16x4, Iop_Shr32x2,
1012       Iop_Sar8x8, Iop_Sar16x4, Iop_Sar32x2,
1013       Iop_Sal8x8, Iop_Sal16x4, Iop_Sal32x2, Iop_Sal64x1,
1014 
1015       /* VECTOR x SCALAR SHIFT (shift amt :: Ity_I8) */
1016       Iop_ShlN8x8, Iop_ShlN16x4, Iop_ShlN32x2,
1017       Iop_ShrN8x8, Iop_ShrN16x4, Iop_ShrN32x2,
1018       Iop_SarN8x8, Iop_SarN16x4, Iop_SarN32x2,
1019 
1020       /* VECTOR x VECTOR SATURATING SHIFT */
1021       Iop_QShl8x8, Iop_QShl16x4, Iop_QShl32x2, Iop_QShl64x1,
1022       Iop_QSal8x8, Iop_QSal16x4, Iop_QSal32x2, Iop_QSal64x1,
1023       /* VECTOR x INTEGER SATURATING SHIFT */
1024       Iop_QShlNsatSU8x8,  Iop_QShlNsatSU16x4,
1025       Iop_QShlNsatSU32x2, Iop_QShlNsatSU64x1,
1026       Iop_QShlNsatUU8x8,  Iop_QShlNsatUU16x4,
1027       Iop_QShlNsatUU32x2, Iop_QShlNsatUU64x1,
1028       Iop_QShlNsatSS8x8,  Iop_QShlNsatSS16x4,
1029       Iop_QShlNsatSS32x2, Iop_QShlNsatSS64x1,
1030 
1031       /* NARROWING (binary) 
1032          -- narrow 2xI64 into 1xI64, hi half from left arg */
1033       /* For saturated narrowing, I believe there are 4 variants of
1034          the basic arithmetic operation, depending on the signedness
1035          of argument and result.  Here are examples that exemplify
1036          what I mean:
1037 
1038          QNarrow16Uto8U ( UShort x )  if (x >u 255) x = 255;
1039                                       return x[7:0];
1040 
1041          QNarrow16Sto8S ( Short x )   if (x <s -128) x = -128;
1042                                       if (x >s  127) x = 127;
1043                                       return x[7:0];
1044 
1045          QNarrow16Uto8S ( UShort x )  if (x >u 127) x = 127;
1046                                       return x[7:0];
1047 
1048          QNarrow16Sto8U ( Short x )   if (x <s 0)   x = 0;
1049                                       if (x >s 255) x = 255;
1050                                       return x[7:0];
1051       */
1052       Iop_QNarrowBin16Sto8Ux8,
1053       Iop_QNarrowBin16Sto8Sx8, Iop_QNarrowBin32Sto16Sx4,
1054       Iop_NarrowBin16to8x8,    Iop_NarrowBin32to16x4,
1055 
1056       /* INTERLEAVING */
1057       /* Interleave lanes from low or high halves of
1058          operands.  Most-significant result lane is from the left
1059          arg. */
1060       Iop_InterleaveHI8x8, Iop_InterleaveHI16x4, Iop_InterleaveHI32x2,
1061       Iop_InterleaveLO8x8, Iop_InterleaveLO16x4, Iop_InterleaveLO32x2,
1062       /* Interleave odd/even lanes of operands.  Most-significant result lane
1063          is from the left arg.  Note that Interleave{Odd,Even}Lanes32x2 are
1064          identical to Interleave{HI,LO}32x2 and so are omitted.*/
1065       Iop_InterleaveOddLanes8x8, Iop_InterleaveEvenLanes8x8,
1066       Iop_InterleaveOddLanes16x4, Iop_InterleaveEvenLanes16x4,
1067 
1068       /* CONCATENATION -- build a new value by concatenating either
1069          the even or odd lanes of both operands.  Note that
1070          Cat{Odd,Even}Lanes32x2 are identical to Interleave{HI,LO}32x2
1071          and so are omitted. */
1072       Iop_CatOddLanes8x8, Iop_CatOddLanes16x4,
1073       Iop_CatEvenLanes8x8, Iop_CatEvenLanes16x4,
1074 
1075       /* GET / SET elements of VECTOR
1076          GET is binop (I64, I8) -> I<elem_size>
1077          SET is triop (I64, I8, I<elem_size>) -> I64 */
1078       /* Note: the arm back-end handles only constant second argument */
1079       Iop_GetElem8x8, Iop_GetElem16x4, Iop_GetElem32x2,
1080       Iop_SetElem8x8, Iop_SetElem16x4, Iop_SetElem32x2,
1081 
1082       /* DUPLICATING -- copy value to all lanes */
1083       Iop_Dup8x8,   Iop_Dup16x4,   Iop_Dup32x2,
1084 
1085       /* SLICE -- produces the lowest 64 bits of (arg1:arg2) >> (8 * arg3).
1086          arg3 is a shift amount in bytes and may be between 0 and 8
1087          inclusive.  When 0, the result is arg2; when 8, the result is arg1.
1088          Not all back ends handle all values.  The arm32 and arm64 back
1089          ends handle only immediate arg3 values. */
1090       Iop_Slice64,  // (I64, I64, I8) -> I64
1091 
1092       /* REVERSE the order of chunks in vector lanes.  Chunks must be
1093          smaller than the vector lanes (obviously) and so may be 8-, 16- and
1094          32-bit in size.  Note that the degenerate case,
1095          Iop_Reverse8sIn64_x1, is a simply a vanilla byte-swap. */
1096       /* Examples:
1097             Reverse8sIn16_x4([a,b,c,d,e,f,g,h]) = [b,a,d,c,f,e,h,g]
1098             Reverse8sIn32_x2([a,b,c,d,e,f,g,h]) = [d,c,b,a,h,g,f,e]
1099             Reverse8sIn64_x1([a,b,c,d,e,f,g,h]) = [h,g,f,e,d,c,b,a] */
1100       Iop_Reverse8sIn16_x4,
1101       Iop_Reverse8sIn32_x2, Iop_Reverse16sIn32_x2,
1102       Iop_Reverse8sIn64_x1, Iop_Reverse16sIn64_x1, Iop_Reverse32sIn64_x1,
1103 
1104       /* PERMUTING -- copy src bytes to dst,
1105          as indexed by control vector bytes:
1106             for i in 0 .. 7 . result[i] = argL[ argR[i] ] 
1107          argR[i] values may only be in the range 0 .. 7, else behaviour
1108          is undefined.  That is, argR[i][7:3] must be zero. */
1109       Iop_Perm8x8,
1110 
1111       /* PERMUTING with optional zeroing:
1112             for i in 0 .. 7 . result[i] = if argR[i] bit 7 is set
1113                                           then zero else argL[ argR[i] ]
1114          argR[i][6:3] must be zero, else behaviour is undefined.
1115       */
1116       Iop_PermOrZero8x8,
1117 
1118       /* MISC CONVERSION -- get high bits of each byte lane, a la
1119          x86/amd64 pmovmskb */
1120       Iop_GetMSBs8x8, /* I64 -> I8 */
1121 
1122       /* Vector Reciprocal Estimate and Vector Reciprocal Square Root Estimate
1123          See floating-point equivalents for details. */
1124       Iop_RecipEst32Ux2, Iop_RSqrtEst32Ux2,
1125 
1126       /* ------------------ Decimal Floating Point ------------------ */
1127 
1128       /* ARITHMETIC INSTRUCTIONS   64-bit
1129      ----------------------------------
1130      IRRoundingMode(I32) X D64 X D64 -> D64
1131       */
1132       Iop_AddD64, Iop_SubD64, Iop_MulD64, Iop_DivD64,
1133 
1134       /* ARITHMETIC INSTRUCTIONS  128-bit
1135      ----------------------------------
1136      IRRoundingMode(I32) X D128 X D128 -> D128
1137       */
1138       Iop_AddD128, Iop_SubD128, Iop_MulD128, Iop_DivD128,
1139 
1140       /* SHIFT SIGNIFICAND INSTRUCTIONS
1141        *    The DFP significand is shifted by the number of digits specified
1142        *    by the U8 operand.  Digits shifted out of the leftmost digit are
1143        *    lost. Zeros are supplied to the vacated positions on the right.
1144        *    The sign of the result is the same as the sign of the original
1145        *    operand.
1146        *
1147        * D64 x U8  -> D64    left shift and right shift respectively */
1148       Iop_ShlD64, Iop_ShrD64,
1149 
1150       /* D128 x U8  -> D128  left shift and right shift respectively */
1151       Iop_ShlD128, Iop_ShrD128,
1152 
1153 
1154       /* FORMAT CONVERSION INSTRUCTIONS
1155        *   D32 -> D64
1156        */
1157       Iop_D32toD64,
1158 
1159       /*   D64 -> D128 */
1160       Iop_D64toD128, 
1161 
1162       /*   I32S -> D128 */
1163       Iop_I32StoD128,
1164 
1165       /*   I32U -> D128 */
1166       Iop_I32UtoD128,
1167 
1168       /*   I64S -> D128 */
1169       Iop_I64StoD128, 
1170 
1171       /*   I64U -> D128 */
1172       Iop_I64UtoD128,
1173 
1174       /*   IRRoundingMode(I32) x I128S -> D128 */
1175       Iop_I128StoD128,
1176 
1177       /*   IRRoundingMode(I32) x D64 -> D32 */
1178       Iop_D64toD32,
1179 
1180       /*   IRRoundingMode(I32) x D128 -> D64 */
1181       Iop_D128toD64,
1182 
1183       /*   I32S -> D64 */
1184       Iop_I32StoD64,
1185 
1186       /*   I32U -> D64 */
1187       Iop_I32UtoD64,
1188 
1189       /*   IRRoundingMode(I32) x I64 -> D64 */
1190       Iop_I64StoD64,
1191 
1192       /*   IRRoundingMode(I32) x I64 -> D64 */
1193       Iop_I64UtoD64,
1194 
1195       /*   IRRoundingMode(I32) x D64 -> I32 */
1196       Iop_D64toI32S,
1197 
1198       /*   IRRoundingMode(I32) x D64 -> I32 */
1199       Iop_D64toI32U,
1200 
1201       /*   IRRoundingMode(I32) x D64 -> I64 */
1202       Iop_D64toI64S,
1203 
1204       /*   IRRoundingMode(I32) x D64 -> I64 */
1205       Iop_D64toI64U,
1206 
1207       /*   IRRoundingMode(I32) x D128 -> I32 */
1208       Iop_D128toI32S,
1209 
1210       /*   IRRoundingMode(I32) x D128 -> I32 */
1211       Iop_D128toI32U,
1212 
1213       /*   IRRoundingMode(I32) x D128 -> I64 */
1214       Iop_D128toI64S,
1215 
1216       /*   IRRoundingMode(I32) x D128 -> I64 */
1217       Iop_D128toI64U,
1218 
1219       /*   IRRoundingMode(I32) x D128 -> I128 */
1220       Iop_D128toI128S,
1221 
1222       /*   IRRoundingMode(I32) x F32 -> D32 */
1223       Iop_F32toD32,
1224 
1225       /*   IRRoundingMode(I32) x F32 -> D64 */
1226       Iop_F32toD64,
1227 
1228       /*   IRRoundingMode(I32) x F32 -> D128 */
1229       Iop_F32toD128,
1230 
1231       /*   IRRoundingMode(I32) x F64 -> D32 */
1232       Iop_F64toD32,
1233 
1234       /*   IRRoundingMode(I32) x F64 -> D64 */
1235       Iop_F64toD64,
1236 
1237       /*   IRRoundingMode(I32) x F64 -> D128 */
1238       Iop_F64toD128,
1239 
1240       /*   IRRoundingMode(I32) x F128 -> D32 */
1241       Iop_F128toD32,
1242 
1243       /*   IRRoundingMode(I32) x F128 -> D64 */
1244       Iop_F128toD64,
1245 
1246       /*   IRRoundingMode(I32) x F128 -> D128 */
1247       Iop_F128toD128,
1248 
1249       /*   IRRoundingMode(I32) x D32 -> F32 */
1250       Iop_D32toF32,
1251 
1252       /*   IRRoundingMode(I32) x D32 -> F64 */
1253       Iop_D32toF64,
1254 
1255       /*   IRRoundingMode(I32) x D32 -> F128 */
1256       Iop_D32toF128,
1257 
1258       /*   IRRoundingMode(I32) x D64 -> F32 */
1259       Iop_D64toF32,
1260 
1261       /*   IRRoundingMode(I32) x D64 -> F64 */
1262       Iop_D64toF64,
1263 
1264       /*   IRRoundingMode(I32) x D64 -> F128 */
1265       Iop_D64toF128,
1266 
1267       /*   IRRoundingMode(I32) x D128 -> F32 */
1268       Iop_D128toF32,
1269 
1270       /*   IRRoundingMode(I32) x D128 -> F64 */
1271       Iop_D128toF64,
1272 
1273       /*   IRRoundingMode(I32) x D128 -> F128 */
1274       Iop_D128toF128,
1275 
1276       /* ROUNDING INSTRUCTIONS
1277        * IRRoundingMode(I32) x D64 -> D64
1278        * The D64 operand, if a finite number, it is rounded to a
1279        * floating point integer value, i.e. no fractional part.
1280        */
1281       Iop_RoundD64toInt,
1282 
1283       /* IRRoundingMode(I32) x D128 -> D128 */
1284       Iop_RoundD128toInt,
1285 
1286       /* COMPARE INSTRUCTIONS
1287        * D64 x D64 -> IRCmpD64Result(I32) */
1288       Iop_CmpD64,
1289 
1290       /* D128 x D128 -> IRCmpD128Result(I32) */
1291       Iop_CmpD128,
1292 
1293       /* COMPARE BIASED EXPONENET INSTRUCTIONS
1294        * D64 x D64 -> IRCmpD64Result(I32) */
1295       Iop_CmpExpD64,
1296 
1297       /* D128 x D128 -> IRCmpD128Result(I32) */
1298       Iop_CmpExpD128,
1299 
1300       /* QUANTIZE AND ROUND INSTRUCTIONS
1301        * The source operand is converted and rounded to the form with the 
1302        * immediate exponent specified by the rounding and exponent parameter.
1303        *
1304        * The second operand is converted and rounded to the form
1305        * of the first operand's exponent and the rounded based on the specified
1306        * rounding mode parameter.
1307        *
1308        * IRRoundingMode(I32) x D64 x D64-> D64 */
1309       Iop_QuantizeD64,
1310 
1311       /* IRRoundingMode(I32) x D128 x D128 -> D128 */
1312       Iop_QuantizeD128,
1313 
1314       /* IRRoundingMode(I32) x I8 x D64 -> D64
1315        *    The Decimal Floating point operand is rounded to the requested 
1316        *    significance given by the I8 operand as specified by the rounding 
1317        *    mode.
1318        */
1319       Iop_SignificanceRoundD64,
1320 
1321       /* IRRoundingMode(I32) x I8 x D128 -> D128 */
1322       Iop_SignificanceRoundD128,
1323 
1324       /* EXTRACT AND INSERT INSTRUCTIONS
1325        * D64 -> I64
1326        *    The exponent of the D32 or D64 operand is extracted.  The 
1327        *    extracted exponent is converted to a 64-bit signed binary integer.
1328        */
1329       Iop_ExtractExpD64,
1330 
1331       /* D128 -> I64 */
1332       Iop_ExtractExpD128,
1333 
1334       /* D64 -> I64
1335        * The number of significand digits of the D64 operand is extracted.
1336        * The number is stored as a 64-bit signed binary integer.
1337        */
1338       Iop_ExtractSigD64,
1339 
1340       /* D128 -> I64 */
1341       Iop_ExtractSigD128,
1342 
1343       /* I64 x D64  -> D64
1344        *    The exponent is specified by the first I64 operand the signed
1345        *    significand is given by the second I64 value.  The result is a D64
1346        *    value consisting of the specified significand and exponent whose 
1347        *    sign is that of the specified significand.
1348        */
1349       Iop_InsertExpD64,
1350 
1351       /* I64 x D128 -> D128 */
1352       Iop_InsertExpD128,
1353 
1354       /* Support for 128-bit DFP type */
1355       Iop_D64HLtoD128, Iop_D128HItoD64, Iop_D128LOtoD64,
1356 
1357       /*  I64 -> I64  
1358        *     Convert 50-bit densely packed BCD string to 60 bit BCD string
1359        */
1360       Iop_DPBtoBCD,
1361 
1362       /* I64 -> I64
1363        *     Convert 60 bit BCD string to 50-bit densely packed BCD string
1364        */
1365       Iop_BCDtoDPB,
1366 
1367       /* BCD arithmetic instructions, (V128, V128) -> V128
1368        * The BCD format is the same as that used in the BCD<->DPB conversion
1369        * routines, except using 124 digits (vs 60) plus the trailing 4-bit
1370        * signed code. */
1371       Iop_BCDAdd, Iop_BCDSub,
1372 
1373       /* Conversion signed 128-bit integer to signed BCD 128-bit */
1374       Iop_I128StoBCD128,
1375 
1376       /* Conversion signed BCD 128-bit to 128-bit integer */
1377       Iop_BCD128toI128S,
1378 
1379       /* Conversion I64 -> D64 */
1380       Iop_ReinterpI64asD64,
1381 
1382       /* Conversion D64 -> I64 */
1383       Iop_ReinterpD64asI64,
1384 
1385       /* ------------------ 128-bit SIMD FP. ------------------ */
1386 
1387       /* --- 16x8 vector FP --- */
1388 
1389       /* binary :: IRRoundingMode(I32) x V128 -> V128 */
1390       Iop_Sqrt16Fx8,
1391 
1392       /* ternary :: IRRoundingMode(I32) x V128 x V128 -> V128 */
1393       Iop_Add16Fx8, Iop_Sub16Fx8,
1394 
1395       /* binary */
1396       Iop_CmpLT16Fx8, Iop_CmpLE16Fx8, Iop_CmpEQ16Fx8,
1397 
1398       /* unary */
1399       Iop_Abs16Fx8,
1400       Iop_Neg16Fx8,
1401 
1402       /* --- 32x4 vector FP --- */
1403 
1404       /* ternary :: IRRoundingMode(I32) x V128 x V128 -> V128 */
1405       Iop_Add32Fx4, Iop_Sub32Fx4, Iop_Mul32Fx4, Iop_Div32Fx4, 
1406 
1407       /* binary */
1408       Iop_Max32Fx4, Iop_Min32Fx4,
1409       Iop_Add32Fx2, Iop_Sub32Fx2,
1410       /* Note: For the following compares, the ppc and arm front-ends assume a
1411          nan in a lane of either argument returns zero for that lane. */
1412       Iop_CmpEQ32Fx4, Iop_CmpLT32Fx4, Iop_CmpLE32Fx4, Iop_CmpUN32Fx4,
1413       Iop_CmpGT32Fx4, Iop_CmpGE32Fx4,
1414 
1415       /* Pairwise Max and Min. See integer pairwise operations for details. */
1416       Iop_PwMax32Fx4, Iop_PwMin32Fx4,
1417 
1418       /* unary */
1419       Iop_Abs32Fx4,
1420       Iop_Neg32Fx4,
1421 
1422       /* binary :: IRRoundingMode(I32) x V128 -> V128 */
1423       Iop_Sqrt32Fx4,
1424 
1425       /* Vector Reciprocal Estimate finds an approximate reciprocal of each
1426          element in the operand vector, and places the results in the
1427          destination vector.  */
1428       Iop_RecipEst32Fx4,
1429 
1430       /* Vector Reciprocal Step computes (2.0 - arg1 * arg2).
1431          Note, that if one of the arguments is zero and another one is infinity
1432          of arbitrary sign the result of the operation is 2.0. */
1433       Iop_RecipStep32Fx4,
1434 
1435       /* Vector Reciprocal Square Root Estimate finds an approximate reciprocal
1436          square root of each element in the operand vector. */
1437       Iop_RSqrtEst32Fx4,
1438 
1439       /* Scaling of vector with a power of 2  (wd[i] <- ws[i] * 2^wt[i]) */
1440       Iop_Scale2_32Fx4,
1441 
1442       /* Vector floating-point base 2 logarithm */
1443       Iop_Log2_32Fx4,
1444 
1445       /* Vector floating-point exponential 2^x */
1446       Iop_Exp2_32Fx4,
1447 
1448       /* Vector Reciprocal Square Root Step computes (3.0 - arg1 * arg2) / 2.0.
1449          Note, that of one of the arguments is zero and another one is infiinty
1450          of arbitrary sign the result of the operation is 1.5. */
1451       Iop_RSqrtStep32Fx4,
1452 
1453       /* --- Int to/from FP conversion --- */
1454       /* Unlike the standard fp conversions, these irops take no
1455          rounding mode argument. Instead the irop trailers _R{M,P,N,Z}
1456          indicate the mode: {-inf, +inf, nearest, zero} respectively. */
1457 
1458       // These carry no rounding mode and are therefore deprecated
1459       Iop_I32UtoF32x4_DEP, Iop_I32StoF32x4_DEP,  /* I32x4 -> F32x4 */
1460 
1461       Iop_I32StoF32x4, /* IRRoundingMode(I32) x V128 -> V128 */
1462       Iop_F32toI32Sx4, /* IRRoundingMode(I32) x V128 -> V128 */
1463 
1464       Iop_F32toI32Ux4_RZ,  Iop_F32toI32Sx4_RZ,  /* F32x4 -> I32x4       */
1465       Iop_QF32toI32Ux4_RZ, Iop_QF32toI32Sx4_RZ, /* F32x4 -> I32x4 (saturating) */
1466       Iop_RoundF32x4_RM, Iop_RoundF32x4_RP,   /* round to fp integer  */
1467       Iop_RoundF32x4_RN, Iop_RoundF32x4_RZ,   /* round to fp integer  */
1468       /* Fixed32 format is floating-point number with fixed number of fraction
1469          bits. The number of fraction bits is passed as a second argument of
1470          type I8. */
1471       Iop_F32ToFixed32Ux4_RZ, Iop_F32ToFixed32Sx4_RZ, /* fp -> fixed-point */
1472       Iop_Fixed32UToF32x4_RN, Iop_Fixed32SToF32x4_RN, /* fixed-point -> fp */
1473 
1474       /* --- Single to/from half conversion --- */
1475       /* FIXME: what kind of rounding in F32x4 -> F16x4 case? */
1476       // FIXME these carry no rounding mode
1477       Iop_F32toF16x4_DEP, /* F32x4(==V128) -> F16x4(==I64), NO ROUNDING MODE */
1478       Iop_F32toF16x4,     /* IRRoundingMode(I32) x V128 -> I64 */
1479       Iop_F16toF32x4,     /* F16x4 -> F32x4 */
1480 
1481       /* -- Double to/from half conversion -- */
1482       Iop_F64toF16x2_DEP, // F64x2 -> F16x2, NO ROUNDING MODE
1483       Iop_F16toF64x2,
1484 
1485       /* Values from two registers converted in smaller type and put in one
1486        IRRoundingMode(I32) x (F32x4 | F32x4) -> Q16x8 */
1487       Iop_F32x4_2toQ16x8,
1488 
1489 
1490       /* --- 32x4 lowest-lane-only scalar FP --- */
1491 
1492       /* In binary cases, upper 3/4 is copied from first operand.  In
1493          unary cases, upper 3/4 is copied from the operand. */
1494 
1495       /* binary */
1496       Iop_Add32F0x4, Iop_Sub32F0x4, Iop_Mul32F0x4, Iop_Div32F0x4, 
1497       Iop_Max32F0x4, Iop_Min32F0x4,
1498       Iop_CmpEQ32F0x4, Iop_CmpLT32F0x4, Iop_CmpLE32F0x4, Iop_CmpUN32F0x4, 
1499 
1500       /* unary */
1501       Iop_RecipEst32F0x4, Iop_Sqrt32F0x4, Iop_RSqrtEst32F0x4,
1502 
1503       /* --- 64x2 vector FP --- */
1504 
1505       /* ternary :: IRRoundingMode(I32) x V128 x V128 -> V128 */
1506       Iop_Add64Fx2, Iop_Sub64Fx2, Iop_Mul64Fx2, Iop_Div64Fx2, 
1507 
1508       /* binary */
1509       Iop_Max64Fx2, Iop_Min64Fx2,
1510       Iop_CmpEQ64Fx2, Iop_CmpLT64Fx2, Iop_CmpLE64Fx2, Iop_CmpUN64Fx2, 
1511 
1512       /* unary */
1513       Iop_Abs64Fx2,
1514       Iop_Neg64Fx2,
1515 
1516       /* binary :: IRRoundingMode(I32) x V128 -> V128 */
1517       Iop_Sqrt64Fx2,
1518 
1519       /* Scaling of vector with a power of 2  (wd[i] <- ws[i] * 2^wt[i]) */
1520       Iop_Scale2_64Fx2,
1521 
1522       /* Vector floating-point base 2 logarithm */
1523       Iop_Log2_64Fx2,
1524 
1525       /* see 32Fx4 variants for description */
1526       Iop_RecipEst64Fx2,    // unary
1527       Iop_RecipStep64Fx2,   // binary
1528       Iop_RSqrtEst64Fx2,    // unary
1529       Iop_RSqrtStep64Fx2,   // binary
1530 
1531 
1532       /* Values from two registers converted in smaller type and put in one
1533        IRRoundingMode(I32) x (F64x2 | F64x2) -> Q32x4 */
1534       Iop_F64x2_2toQ32x4,
1535 
1536       /* --- 64x2 lowest-lane-only scalar FP --- */
1537 
1538       /* In binary cases, upper half is copied from first operand.  In
1539          unary cases, upper half is copied from the operand. */
1540 
1541       /* binary */
1542       Iop_Add64F0x2, Iop_Sub64F0x2, Iop_Mul64F0x2, Iop_Div64F0x2, 
1543       Iop_Max64F0x2, Iop_Min64F0x2,
1544       Iop_CmpEQ64F0x2, Iop_CmpLT64F0x2, Iop_CmpLE64F0x2, Iop_CmpUN64F0x2, 
1545 
1546       /* unary */
1547       Iop_Sqrt64F0x2,
1548 
1549       /* --- pack / unpack --- */
1550 
1551       /* 64 <-> 128 bit vector */
1552       Iop_V128to64,     // :: V128 -> I64, low half
1553       Iop_V128HIto64,   // :: V128 -> I64, high half
1554       Iop_64HLtoV128,   // :: (I64,I64) -> V128
1555 
1556       Iop_64UtoV128,
1557       Iop_SetV128lo64,
1558 
1559       /* Copies lower 64/32/16/8 bits, zeroes out the rest. */
1560       Iop_ZeroHI64ofV128,    // :: V128 -> V128
1561       Iop_ZeroHI96ofV128,    // :: V128 -> V128
1562       Iop_ZeroHI112ofV128,   // :: V128 -> V128
1563       Iop_ZeroHI120ofV128,   // :: V128 -> V128
1564 
1565       /* 32 <-> 128 bit vector */
1566       Iop_32UtoV128,
1567       Iop_V128to32,     // :: V128 -> I32, lowest lane
1568       Iop_SetV128lo32,  // :: (V128,I32) -> V128
1569 
1570       /* ------------------ 128-bit SIMD Integer. ------------------ */
1571 
1572       /* BITWISE OPS */
1573       Iop_NotV128,
1574       Iop_AndV128, Iop_OrV128, Iop_XorV128, 
1575 
1576       /* VECTOR SHIFT (shift amt :: Ity_I8) */
1577       Iop_ShlV128, Iop_ShrV128, Iop_SarV128,
1578 
1579       /* MISC (vector integer cmp != 0) */
1580       Iop_CmpNEZ8x16, Iop_CmpNEZ16x8, Iop_CmpNEZ32x4, Iop_CmpNEZ64x2,
1581       Iop_CmpNEZ128x1,
1582 
1583       /* ADDITION (normal / U->U sat / S->S sat) */
1584       Iop_Add8x16,    Iop_Add16x8,    Iop_Add32x4,    Iop_Add64x2,   Iop_Add128x1,
1585       Iop_QAdd8Ux16,  Iop_QAdd16Ux8,  Iop_QAdd32Ux4,  Iop_QAdd64Ux2,
1586       Iop_QAdd8Sx16,  Iop_QAdd16Sx8,  Iop_QAdd32Sx4,  Iop_QAdd64Sx2,
1587 
1588       /* ADDITION, ARM64 specific saturating variants. */
1589       /* Unsigned widen left arg, signed widen right arg, add, saturate S->S.
1590          This corresponds to SUQADD. */
1591       Iop_QAddExtUSsatSS8x16, Iop_QAddExtUSsatSS16x8,
1592       Iop_QAddExtUSsatSS32x4, Iop_QAddExtUSsatSS64x2,
1593       /* Signed widen left arg, unsigned widen right arg, add, saturate U->U.
1594          This corresponds to USQADD. */
1595       Iop_QAddExtSUsatUU8x16, Iop_QAddExtSUsatUU16x8,
1596       Iop_QAddExtSUsatUU32x4, Iop_QAddExtSUsatUU64x2,
1597 
1598       /* SUBTRACTION (normal / unsigned sat / signed sat) */
1599       Iop_Sub8x16,   Iop_Sub16x8,   Iop_Sub32x4,   Iop_Sub64x2,   Iop_Sub128x1,
1600       Iop_QSub8Ux16, Iop_QSub16Ux8, Iop_QSub32Ux4, Iop_QSub64Ux2,
1601       Iop_QSub8Sx16, Iop_QSub16Sx8, Iop_QSub32Sx4, Iop_QSub64Sx2,
1602 
1603       /* MULTIPLICATION (normal / high half of signed/unsigned) */
1604       Iop_Mul8x16,  Iop_Mul16x8,    Iop_Mul32x4,
1605       Iop_MulHi8Ux16, Iop_MulHi16Ux8, Iop_MulHi32Ux4,
1606       Iop_MulHi8Sx16, Iop_MulHi16Sx8, Iop_MulHi32Sx4,
1607       /* (widening signed/unsigned of even lanes, with lowest lane=zero) */
1608       Iop_MullEven8Ux16, Iop_MullEven16Ux8, Iop_MullEven32Ux4,
1609       Iop_MullEven8Sx16, Iop_MullEven16Sx8, Iop_MullEven32Sx4,
1610 
1611       /* Widening multiplies, all of the form (I64, I64) -> V128 */
1612       Iop_Mull8Ux8, Iop_Mull8Sx8,
1613       Iop_Mull16Ux4, Iop_Mull16Sx4,
1614       Iop_Mull32Ux2, Iop_Mull32Sx2,
1615 
1616       /* Signed doubling saturating widening multiplies, (I64, I64) -> V128 */
1617       Iop_QDMull16Sx4, Iop_QDMull32Sx2,
1618 
1619       /* Vector Saturating Doubling Multiply Returning High Half and
1620          Vector Saturating Rounding Doubling Multiply Returning High Half.
1621          These IROps multiply corresponding elements in two vectors, double
1622          the results, and place the most significant half of the final results
1623          in the destination vector.  The results are truncated or rounded.  If
1624          any of the results overflow, they are saturated.  To be more precise,
1625          for each lane, the computed result is: 
1626            QDMulHi:  
1627              hi-half( sign-extend(laneL) *q sign-extend(laneR) *q 2 )
1628            QRDMulHi:
1629              hi-half( sign-extend(laneL) *q sign-extend(laneR) *q 2
1630                       +q (1 << (lane-width-in-bits - 1)) )
1631       */
1632       Iop_QDMulHi16Sx8,  Iop_QDMulHi32Sx4,  /* (V128, V128) -> V128 */
1633       Iop_QRDMulHi16Sx8, Iop_QRDMulHi32Sx4, /* (V128, V128) -> V128 */
1634 
1635       /* Polynomial multiplication treats its arguments as
1636          coefficients of polynomials over {0, 1}. */
1637       Iop_PolynomialMul8x16, /* (V128, V128) -> V128 */
1638       Iop_PolynomialMull8x8, /*   (I64, I64) -> V128 */
1639 
1640       /* Vector Polynomial multiplication add.   (V128, V128) -> V128
1641 
1642        *** Below is the algorithm for the instructions. These Iops could
1643            be emulated to get this functionality, but the emulation would
1644            be long and messy.
1645 
1646         Example for polynomial multiply add for vector of bytes
1647         do i = 0 to 15
1648             prod[i].bit[0:14] <- 0
1649             srcA <- VR[argL].byte[i]
1650             srcB <- VR[argR].byte[i]
1651             do j = 0 to 7
1652                 do k = 0 to j
1653                     gbit <- srcA.bit[k] & srcB.bit[j-k]
1654                     prod[i].bit[j] <- prod[i].bit[j] ^ gbit
1655                 end
1656             end
1657 
1658             do j = 8 to 14
1659                 do k = j-7 to 7
1660                      gbit <- (srcA.bit[k] & srcB.bit[j-k])
1661                      prod[i].bit[j] <- prod[i].bit[j] ^ gbit
1662                 end
1663             end
1664         end
1665 
1666         do i = 0 to 7
1667             VR[dst].hword[i] <- 0b0 || (prod[2×i] ^ prod[2×i+1])
1668         end
1669       */
1670       Iop_PolynomialMulAdd8x16, Iop_PolynomialMulAdd16x8,
1671       Iop_PolynomialMulAdd32x4, Iop_PolynomialMulAdd64x2,
1672 
1673       /* PAIRWISE operations */
1674       /* Iop_PwFoo16x4( [a,b,c,d], [e,f,g,h] ) =
1675             [Foo16(a,b), Foo16(c,d), Foo16(e,f), Foo16(g,h)] */
1676       Iop_PwAdd8x16, Iop_PwAdd16x8, Iop_PwAdd32x4,
1677       Iop_PwAdd32Fx2,
1678 
1679       /* Longening variant is unary. The resulting vector contains two times
1680          less elements than operand, but they are two times wider.
1681          Example:
1682             Iop_PwAddL16Ux4( [a,b,c,d] ) = [a+b,c+d]
1683                where a+b and c+d are unsigned 32-bit values. */
1684       Iop_PwAddL8Ux16, Iop_PwAddL16Ux8, Iop_PwAddL32Ux4, Iop_PwAddL64Ux2,
1685       Iop_PwAddL8Sx16, Iop_PwAddL16Sx8, Iop_PwAddL32Sx4,
1686 
1687       /* This is amd64 PMADDUBSW, (V128, V128) -> V128.  For each adjacent pair
1688          of bytes [a,b] in the first arg and [c,d] in the second, computes:
1689             signed/signed sat to 16 bits ( zxTo16(a) * sxTo16(b) 
1690                                            + zxTo16(c) * sxTo16(d) )
1691          This exists because it's frequently used and there's no reasonably
1692          concise way to express it using other IROps.
1693       */
1694       Iop_PwExtUSMulQAdd8x16,
1695 
1696       /* Other unary pairwise ops */
1697 
1698       /* Vector bit matrix transpose.  (V128) -> V128 */
1699       /* For each doubleword element of the source vector, an 8-bit x 8-bit
1700        * matrix transpose is performed. */
1701       Iop_PwBitMtxXpose64x2,
1702 
1703       /* ABSOLUTE VALUE */
1704       Iop_Abs8x16, Iop_Abs16x8, Iop_Abs32x4, Iop_Abs64x2,
1705 
1706       /* AVERAGING: note: (arg1 + arg2 + 1) >>u 1 */
1707       Iop_Avg8Ux16, Iop_Avg16Ux8, Iop_Avg32Ux4, Iop_Avg64Ux2,
1708       Iop_Avg8Sx16, Iop_Avg16Sx8, Iop_Avg32Sx4, Iop_Avg64Sx2,
1709 
1710       /* MIN/MAX */
1711       Iop_Max8Sx16, Iop_Max16Sx8, Iop_Max32Sx4, Iop_Max64Sx2,
1712       Iop_Max8Ux16, Iop_Max16Ux8, Iop_Max32Ux4, Iop_Max64Ux2,
1713       Iop_Min8Sx16, Iop_Min16Sx8, Iop_Min32Sx4, Iop_Min64Sx2,
1714       Iop_Min8Ux16, Iop_Min16Ux8, Iop_Min32Ux4, Iop_Min64Ux2,
1715 
1716       /* COMPARISON */
1717       Iop_CmpEQ8x16,  Iop_CmpEQ16x8,  Iop_CmpEQ32x4,  Iop_CmpEQ64x2,
1718       Iop_CmpGT8Sx16, Iop_CmpGT16Sx8, Iop_CmpGT32Sx4, Iop_CmpGT64Sx2,
1719       Iop_CmpGT8Ux16, Iop_CmpGT16Ux8, Iop_CmpGT32Ux4, Iop_CmpGT64Ux2,
1720 
1721       /* COUNT ones / leading zeroes / leading sign bits (not including topmost
1722          bit) */
1723       Iop_Cnt8x16,
1724       Iop_Clz8x16, Iop_Clz16x8, Iop_Clz32x4,
1725       Iop_Cls8x16, Iop_Cls16x8, Iop_Cls32x4,
1726 
1727       /* VECTOR x SCALAR SHIFT (shift amt :: Ity_I8) */
1728       Iop_ShlN8x16, Iop_ShlN16x8, Iop_ShlN32x4, Iop_ShlN64x2,
1729       Iop_ShrN8x16, Iop_ShrN16x8, Iop_ShrN32x4, Iop_ShrN64x2,
1730       Iop_SarN8x16, Iop_SarN16x8, Iop_SarN32x4, Iop_SarN64x2,
1731 
1732       /* VECTOR x VECTOR SHIFT / ROTATE */
1733       /* FIXME: I'm pretty sure the ARM32 front/back ends interpret these
1734          differently from all other targets.  The intention is that
1735          the shift amount (2nd arg) is interpreted as unsigned and
1736          only the lowest log2(lane-bits) bits are relevant.  But the
1737          ARM32 versions treat the shift amount as an 8 bit signed
1738          number.  The ARM32 uses should be replaced by the relevant
1739          vector x vector bidirectional shifts instead. */
1740       Iop_Shl8x16, Iop_Shl16x8, Iop_Shl32x4, Iop_Shl64x2,
1741       Iop_Shr8x16, Iop_Shr16x8, Iop_Shr32x4, Iop_Shr64x2,
1742       Iop_Sar8x16, Iop_Sar16x8, Iop_Sar32x4, Iop_Sar64x2,
1743       Iop_Sal8x16, Iop_Sal16x8, Iop_Sal32x4, Iop_Sal64x2,
1744       Iop_Rol8x16, Iop_Rol16x8, Iop_Rol32x4, Iop_Rol64x2,
1745 
1746       /* VECTOR x VECTOR SATURATING SHIFT */
1747       Iop_QShl8x16, Iop_QShl16x8, Iop_QShl32x4, Iop_QShl64x2,
1748       Iop_QSal8x16, Iop_QSal16x8, Iop_QSal32x4, Iop_QSal64x2,
1749       /* VECTOR x INTEGER SATURATING SHIFT */
1750       Iop_QShlNsatSU8x16, Iop_QShlNsatSU16x8,
1751       Iop_QShlNsatSU32x4, Iop_QShlNsatSU64x2,
1752       Iop_QShlNsatUU8x16, Iop_QShlNsatUU16x8,
1753       Iop_QShlNsatUU32x4, Iop_QShlNsatUU64x2,
1754       Iop_QShlNsatSS8x16, Iop_QShlNsatSS16x8,
1755       Iop_QShlNsatSS32x4, Iop_QShlNsatSS64x2,
1756 
1757       /* VECTOR x VECTOR BIDIRECTIONAL SATURATING (& MAYBE ROUNDING) SHIFT */
1758       /* All of type (V128, V128) -> V256. */
1759       /* The least significant 8 bits of each lane of the second
1760          operand are used as the shift amount, and interpreted signedly.
1761          Positive values mean a shift left, negative a shift right.  The
1762          result is signedly or unsignedly saturated.  There are also
1763          rounding variants, which add 2^(shift_amount-1) to the value before
1764          shifting, but only in the shift-right case.  Vacated positions
1765          are filled with zeroes.  IOW, it's either SHR or SHL, but not SAR.
1766 
1767          These operations return 129 bits: one bit ("Q") indicating whether
1768          saturation occurred, and the shift result.  The result type is V256,
1769          of which the lower V128 is the shift result, and Q occupies the
1770          least significant bit of the upper V128.  All other bits of the
1771          upper V128 are zero. */
1772       // Unsigned saturation, no rounding
1773       Iop_QandUQsh8x16, Iop_QandUQsh16x8,
1774       Iop_QandUQsh32x4, Iop_QandUQsh64x2,
1775       // Signed saturation, no rounding
1776       Iop_QandSQsh8x16, Iop_QandSQsh16x8,
1777       Iop_QandSQsh32x4, Iop_QandSQsh64x2,
1778 
1779       // Unsigned saturation, rounding
1780       Iop_QandUQRsh8x16, Iop_QandUQRsh16x8,
1781       Iop_QandUQRsh32x4, Iop_QandUQRsh64x2,
1782       // Signed saturation, rounding
1783       Iop_QandSQRsh8x16, Iop_QandSQRsh16x8,
1784       Iop_QandSQRsh32x4, Iop_QandSQRsh64x2,
1785 
1786       /* VECTOR x VECTOR BIDIRECTIONAL (& MAYBE ROUNDING) SHIFT */
1787       /* All of type (V128, V128) -> V128 */
1788       /* The least significant 8 bits of each lane of the second
1789          operand are used as the shift amount, and interpreted signedly.
1790          Positive values mean a shift left, negative a shift right.
1791          There are also rounding variants, which add 2^(shift_amount-1)
1792          to the value before shifting, but only in the shift-right case.
1793 
1794          For left shifts, the vacated places are filled with zeroes.
1795          For right shifts, the vacated places are filled with zeroes
1796          for the U variants and sign bits for the S variants. */
1797       // Signed and unsigned, non-rounding
1798       Iop_Sh8Sx16, Iop_Sh16Sx8, Iop_Sh32Sx4, Iop_Sh64Sx2,
1799       Iop_Sh8Ux16, Iop_Sh16Ux8, Iop_Sh32Ux4, Iop_Sh64Ux2,
1800 
1801       // Signed and unsigned, rounding
1802       Iop_Rsh8Sx16, Iop_Rsh16Sx8, Iop_Rsh32Sx4, Iop_Rsh64Sx2,
1803       Iop_Rsh8Ux16, Iop_Rsh16Ux8, Iop_Rsh32Ux4, Iop_Rsh64Ux2,
1804 
1805       /* The least significant 8 bits of each lane of the second
1806          operand are used as the shift amount, and interpreted signedly.
1807          Positive values mean a shift left, negative a shift right.  The
1808          result is signedly or unsignedly saturated.  There are also
1809          rounding variants, which add 2^(shift_amount-1) to the value before
1810          shifting, but only in the shift-right case.  Vacated positions
1811          are filled with zeroes.  IOW, it's either SHR or SHL, but not SAR.
1812       */
1813 
1814       /* VECTOR x SCALAR SATURATING (& MAYBE ROUNDING) NARROWING SHIFT RIGHT */
1815       /* All of type (V128, I8) -> V128 */
1816       /* The first argument is shifted right, then narrowed to half the width
1817          by saturating it.  The second argument is a scalar shift amount that
1818          applies to all lanes, and must be a value in the range 1 to lane_width.
1819          The shift may be done signedly (Sar variants) or unsignedly (Shr
1820          variants).  The saturation is done according to the two signedness
1821          indicators at the end of the name.  For example 64Sto32U means a
1822          signed 64 bit value is saturated into an unsigned 32 bit value.
1823          Additionally, the QRS variants do rounding, that is, they add the
1824          value (1 << (shift_amount-1)) to each source lane before shifting.
1825 
1826          These operations return 65 bits: one bit ("Q") indicating whether
1827          saturation occurred, and the shift result.  The result type is V128,
1828          of which the lower half is the shift result, and Q occupies the
1829          least significant bit of the upper half.  All other bits of the
1830          upper half are zero. */
1831       // No rounding, sat U->U
1832       Iop_QandQShrNnarrow16Uto8Ux8,
1833       Iop_QandQShrNnarrow32Uto16Ux4, Iop_QandQShrNnarrow64Uto32Ux2,
1834       // No rounding, sat S->S
1835       Iop_QandQSarNnarrow16Sto8Sx8,
1836       Iop_QandQSarNnarrow32Sto16Sx4, Iop_QandQSarNnarrow64Sto32Sx2,
1837       // No rounding, sat S->U
1838       Iop_QandQSarNnarrow16Sto8Ux8,
1839       Iop_QandQSarNnarrow32Sto16Ux4, Iop_QandQSarNnarrow64Sto32Ux2,
1840 
1841       // Rounding, sat U->U
1842       Iop_QandQRShrNnarrow16Uto8Ux8,
1843       Iop_QandQRShrNnarrow32Uto16Ux4, Iop_QandQRShrNnarrow64Uto32Ux2,
1844       // Rounding, sat S->S
1845       Iop_QandQRSarNnarrow16Sto8Sx8,
1846       Iop_QandQRSarNnarrow32Sto16Sx4, Iop_QandQRSarNnarrow64Sto32Sx2,
1847       // Rounding, sat S->U
1848       Iop_QandQRSarNnarrow16Sto8Ux8,
1849       Iop_QandQRSarNnarrow32Sto16Ux4, Iop_QandQRSarNnarrow64Sto32Ux2,
1850 
1851       /* NARROWING (binary) 
1852          -- narrow 2xV128 into 1xV128, hi half from left arg */
1853       /* See comments above w.r.t. U vs S issues in saturated narrowing. */
1854       Iop_QNarrowBin16Sto8Ux16, Iop_QNarrowBin32Sto16Ux8,
1855       Iop_QNarrowBin16Sto8Sx16, Iop_QNarrowBin32Sto16Sx8,
1856       Iop_QNarrowBin16Uto8Ux16, Iop_QNarrowBin32Uto16Ux8,
1857       Iop_NarrowBin16to8x16, Iop_NarrowBin32to16x8,
1858       Iop_QNarrowBin64Sto32Sx4, Iop_QNarrowBin64Uto32Ux4,
1859       Iop_NarrowBin64to32x4,
1860 
1861       /* NARROWING (unary) -- narrow V128 into I64 */
1862       Iop_NarrowUn16to8x8, Iop_NarrowUn32to16x4, Iop_NarrowUn64to32x2,
1863       /* Saturating narrowing from signed source to signed/unsigned
1864          destination */
1865       Iop_QNarrowUn16Sto8Sx8, Iop_QNarrowUn32Sto16Sx4, Iop_QNarrowUn64Sto32Sx2,
1866       Iop_QNarrowUn16Sto8Ux8, Iop_QNarrowUn32Sto16Ux4, Iop_QNarrowUn64Sto32Ux2,
1867       /* Saturating narrowing from unsigned source to unsigned destination */
1868       Iop_QNarrowUn16Uto8Ux8, Iop_QNarrowUn32Uto16Ux4, Iop_QNarrowUn64Uto32Ux2,
1869 
1870       /* WIDENING -- sign or zero extend each element of the argument
1871          vector to the twice original size.  The resulting vector consists of
1872          the same number of elements but each element and the vector itself
1873          are twice as wide.
1874          All operations are I64->V128.
1875          Example
1876             Iop_Widen32Sto64x2( [a, b] ) = [c, d]
1877                where c = Iop_32Sto64(a) and d = Iop_32Sto64(b) */
1878       Iop_Widen8Uto16x8, Iop_Widen16Uto32x4, Iop_Widen32Uto64x2,
1879       Iop_Widen8Sto16x8, Iop_Widen16Sto32x4, Iop_Widen32Sto64x2,
1880 
1881       /* INTERLEAVING */
1882       /* Interleave lanes from low or high halves of
1883          operands.  Most-significant result lane is from the left
1884          arg. */
1885       Iop_InterleaveHI8x16, Iop_InterleaveHI16x8,
1886       Iop_InterleaveHI32x4, Iop_InterleaveHI64x2,
1887       Iop_InterleaveLO8x16, Iop_InterleaveLO16x8,
1888       Iop_InterleaveLO32x4, Iop_InterleaveLO64x2,
1889       /* Interleave odd/even lanes of operands.  Most-significant result lane
1890          is from the left arg. */
1891       Iop_InterleaveOddLanes8x16, Iop_InterleaveEvenLanes8x16,
1892       Iop_InterleaveOddLanes16x8, Iop_InterleaveEvenLanes16x8,
1893       Iop_InterleaveOddLanes32x4, Iop_InterleaveEvenLanes32x4,
1894 
1895       /* Pack even/odd lanes. */
1896       Iop_PackOddLanes8x16, Iop_PackEvenLanes8x16,
1897       Iop_PackOddLanes16x8, Iop_PackEvenLanes16x8,
1898       Iop_PackOddLanes32x4, Iop_PackEvenLanes32x4,
1899 
1900       /* CONCATENATION -- build a new value by concatenating either
1901          the even or odd lanes of both operands.  Note that
1902          Cat{Odd,Even}Lanes64x2 are identical to Interleave{HI,LO}64x2
1903          and so are omitted. */
1904       Iop_CatOddLanes8x16, Iop_CatOddLanes16x8, Iop_CatOddLanes32x4,
1905       Iop_CatEvenLanes8x16, Iop_CatEvenLanes16x8, Iop_CatEvenLanes32x4,
1906 
1907       /* GET elements of VECTOR
1908          GET is binop (V128, I8) -> I<elem_size>
1909          SET is triop (V128, I8, I<elem_size>) -> V128 */
1910       /* Note: the arm back-end handles only constant second argument. */
1911       Iop_GetElem8x16, Iop_GetElem16x8, Iop_GetElem32x4, Iop_GetElem64x2,
1912       Iop_SetElem8x16, Iop_SetElem16x8, Iop_SetElem32x4, Iop_SetElem64x2,
1913 
1914       /* DUPLICATING -- copy value to all lanes */
1915       Iop_Dup8x16,   Iop_Dup16x8,   Iop_Dup32x4,
1916 
1917       /* SLICE -- produces the lowest 128 bits of (arg1:arg2) >> (8 * arg3).
1918          arg3 is a shift amount in bytes and may be between 0 and 16
1919          inclusive.  When 0, the result is arg2; when 16, the result is arg1.
1920          Not all back ends handle all values.  The arm64 back
1921          end handles only immediate arg3 values. */
1922       Iop_SliceV128,  // (V128, V128, I8) -> V128
1923 
1924       /* REVERSE the order of chunks in vector lanes.  Chunks must be
1925          smaller than the vector lanes (obviously) and so may be 8-,
1926          16- and 32-bit in size.  See definitions of 64-bit SIMD
1927          versions above for examples. */
1928       Iop_Reverse8sIn16_x8,
1929       Iop_Reverse8sIn32_x4, Iop_Reverse16sIn32_x4,
1930       Iop_Reverse8sIn64_x2, Iop_Reverse16sIn64_x2, Iop_Reverse32sIn64_x2,
1931       Iop_Reverse1sIn8_x16, /* Reverse bits in each byte lane. */
1932 
1933       /* PERMUTING -- copy src bytes to dst,
1934          as indexed by control vector bytes:
1935             for i in 0 .. 15 . result[i] = argL[ argR[i] ] 
1936          argR[i] values may only be in the range 0 .. 15, else behaviour
1937          is undefined.  That is, argR[i][7:4] must be zero. */
1938       Iop_Perm8x16,
1939       Iop_Perm32x4, /* ditto, except argR values are restricted to 0 .. 3 */
1940 
1941       /* PERMUTING with optional zeroing:
1942             for i in 0 .. 15 . result[i] = if argR[i] bit 7 is set
1943                                            then zero else argL[ argR[i] ]
1944          argR[i][6:4] must be zero, else behaviour is undefined.
1945       */
1946       Iop_PermOrZero8x16,
1947 
1948       /* same, but Triop (argL consists of two 128-bit parts) */
1949       /* correct range for argR values is 0..31 */
1950       /* (V128, V128, V128) -> V128 */
1951       /* (ArgL_first, ArgL_second, ArgR) -> result */
1952       Iop_Perm8x16x2,
1953 
1954       /* MISC CONVERSION -- get high bits of each byte lane, a la
1955          x86/amd64 pmovmskb */
1956       Iop_GetMSBs8x16, /* V128 -> I16 */
1957 
1958       /* Vector Reciprocal Estimate and Vector Reciprocal Square Root Estimate
1959          See floating-point equivalents for details. */
1960       Iop_RecipEst32Ux4, Iop_RSqrtEst32Ux4,
1961 
1962       /* 128-bit multipy by 10 instruction, result is lower 128-bits */
1963       Iop_MulI128by10,
1964 
1965       /* 128-bit multipy by 10 instruction, result is carry out from the MSB */
1966       Iop_MulI128by10Carry,
1967 
1968       /* 128-bit multipy by 10 instruction, result is lower 128-bits of the
1969        * source times 10 plus the carry in
1970        */
1971       Iop_MulI128by10E,
1972 
1973       /* 128-bit multipy by 10 instruction, result is carry out from the MSB
1974        * of the source times 10 plus the carry in
1975        */
1976       Iop_MulI128by10ECarry,
1977 
1978      /* 128-bit carry out from ((U64 * U64 -> U128) + (U64 * U64 -> U128)) */
1979       Iop_2xMultU64Add128CarryOut,
1980 
1981       /* ------------------ 256-bit SIMD Integer. ------------------ */
1982 
1983       /* Pack/unpack */
1984       Iop_V256to64_0,  // V256 -> I64, extract least significant lane
1985       Iop_V256to64_1,
1986       Iop_V256to64_2,
1987       Iop_V256to64_3,  // V256 -> I64, extract most significant lane
1988 
1989       Iop_64x4toV256,  // (I64,I64,I64,I64)->V256
1990                        // first arg is most significant lane
1991 
1992       Iop_V256toV128_0, // V256 -> V128, less significant lane
1993       Iop_V256toV128_1, // V256 -> V128, more significant lane
1994       Iop_V128HLtoV256, // (V128,V128)->V256, first arg is most signif
1995 
1996       Iop_AndV256,
1997       Iop_OrV256,
1998       Iop_XorV256,
1999       Iop_NotV256,
2000 
2001       /* MISC (vector integer cmp != 0) */
2002       Iop_CmpNEZ8x32, Iop_CmpNEZ16x16, Iop_CmpNEZ32x8, Iop_CmpNEZ64x4,
2003 
2004       Iop_Add8x32,    Iop_Add16x16,    Iop_Add32x8,    Iop_Add64x4,
2005       Iop_Sub8x32,    Iop_Sub16x16,    Iop_Sub32x8,    Iop_Sub64x4,
2006 
2007       Iop_CmpEQ8x32,  Iop_CmpEQ16x16,  Iop_CmpEQ32x8,  Iop_CmpEQ64x4,
2008       Iop_CmpGT8Sx32, Iop_CmpGT16Sx16, Iop_CmpGT32Sx8, Iop_CmpGT64Sx4,
2009 
2010       Iop_ShlN16x16, Iop_ShlN32x8, Iop_ShlN64x4,
2011       Iop_ShrN16x16, Iop_ShrN32x8, Iop_ShrN64x4,
2012       Iop_SarN16x16, Iop_SarN32x8,
2013 
2014       Iop_Max8Sx32, Iop_Max16Sx16, Iop_Max32Sx8,
2015       Iop_Max8Ux32, Iop_Max16Ux16, Iop_Max32Ux8,
2016       Iop_Min8Sx32, Iop_Min16Sx16, Iop_Min32Sx8,
2017       Iop_Min8Ux32, Iop_Min16Ux16, Iop_Min32Ux8,
2018 
2019       Iop_Mul16x16, Iop_Mul32x8,
2020       Iop_MulHi16Ux16, Iop_MulHi16Sx16,
2021 
2022       Iop_QAdd8Ux32, Iop_QAdd16Ux16,
2023       Iop_QAdd8Sx32, Iop_QAdd16Sx16,
2024       Iop_QSub8Ux32, Iop_QSub16Ux16,
2025       Iop_QSub8Sx32, Iop_QSub16Sx16,
2026 
2027       Iop_Avg8Ux32, Iop_Avg16Ux16,
2028 
2029       Iop_Perm32x8,
2030 
2031       /* (V128, V128) -> V128 */
2032       Iop_CipherV128, Iop_CipherLV128, Iop_CipherSV128,
2033       Iop_NCipherV128, Iop_NCipherLV128,
2034 
2035       /* Hash instructions, Federal Information Processing Standards
2036        * Publication 180-3 Secure Hash Standard. */
2037       /* (V128, I8) -> V128; The I8 input arg is (ST | SIX), where ST and
2038        * SIX are fields from the insn. See ISA 2.07 description of
2039        * vshasigmad and vshasigmaw insns.*/
2040       Iop_SHA512, Iop_SHA256,
2041 
2042       /* ------------------ 256-bit SIMD FP. ------------------ */
2043 
2044       /* ternary :: IRRoundingMode(I32) x V256 x V256 -> V256 */
2045       Iop_Add64Fx4, Iop_Sub64Fx4, Iop_Mul64Fx4, Iop_Div64Fx4,
2046       Iop_Add32Fx8, Iop_Sub32Fx8, Iop_Mul32Fx8, Iop_Div32Fx8,
2047 
2048       Iop_I32StoF32x8, /* IRRoundingMode(I32) x V256 -> V256 */
2049       Iop_F32toI32Sx8, /* IRRoundingMode(I32) x V256 -> V256 */
2050 
2051       Iop_F32toF16x8,  /* IRRoundingMode(I32) x V256 -> V128 */
2052       Iop_F16toF32x8,  /* F16x8(==V128) -> F32x8(==V256) */
2053 
2054       Iop_Sqrt32Fx8,
2055       Iop_Sqrt64Fx4,
2056       Iop_RSqrtEst32Fx8,
2057       Iop_RecipEst32Fx8,
2058 
2059       Iop_Max32Fx8, Iop_Min32Fx8,
2060       Iop_Max64Fx4, Iop_Min64Fx4,
2061       Iop_Rotx32, Iop_Rotx64,
2062       Iop_LAST      /* must be the last enumerator */
2063    }
2064    IROp;
2065 
2066 /* Pretty-print an op. */
2067 extern void ppIROp ( IROp );
2068 
2069 /* For a given operand return the types of its arguments and its result. */
2070 extern void typeOfPrimop ( IROp op,
2071                            /*OUTs*/ IRType* t_dst, IRType* t_arg1,
2072                            IRType* t_arg2, IRType* t_arg3, IRType* t_arg4 );
2073 
2074 /* Might the given primop trap (eg, attempt integer division by zero)?  If in
2075    doubt returns True.  However, the vast majority of primops will never
2076    trap. */
2077 extern Bool primopMightTrap ( IROp op );
2078 
2079 /* Encoding of IEEE754-specified rounding modes.
2080    Note, various front and back ends rely on the actual numerical
2081    values of these, so do not change them. */
2082 typedef
2083    enum { 
2084       Irrm_NEAREST              = 0,  // Round to nearest, ties to even
2085       Irrm_NegINF               = 1,  // Round to negative infinity
2086       Irrm_PosINF               = 2,  // Round to positive infinity
2087       Irrm_ZERO                 = 3,  // Round toward zero
2088       Irrm_NEAREST_TIE_AWAY_0   = 4,  // Round to nearest, ties away from 0
2089       Irrm_PREPARE_SHORTER      = 5,  // Round to prepare for shorter 
2090                                       // precision
2091       Irrm_AWAY_FROM_ZERO       = 6,  // Round to away from 0
2092       Irrm_NEAREST_TIE_TOWARD_0 = 7   // Round to nearest, ties towards 0
2093    }
2094    IRRoundingMode;
2095 
2096 /* Binary floating point comparison result values.
2097    This is also derived from what IA32 does. */
2098 typedef
2099    enum {
2100       Ircr_UN = 0x45,
2101       Ircr_LT = 0x01,
2102       Ircr_GT = 0x00,
2103       Ircr_EQ = 0x40
2104    }
2105    IRCmpFResult;
2106 
2107 typedef IRCmpFResult IRCmpF32Result;
2108 typedef IRCmpFResult IRCmpF64Result;
2109 typedef IRCmpFResult IRCmpF128Result;
2110 
2111 /* Decimal floating point result values. */
2112 typedef IRCmpFResult IRCmpDResult;
2113 typedef IRCmpDResult IRCmpD64Result;
2114 typedef IRCmpDResult IRCmpD128Result;
2115 
2116 /* ------------------ Expressions ------------------ */
2117 
2118 typedef struct _IRQop   IRQop;   /* forward declaration */
2119 typedef struct _IRTriop IRTriop; /* forward declaration */
2120 
2121 
2122 /* The different kinds of expressions.  Their meaning is explained below
2123    in the comments for IRExpr. */
2124 typedef
2125    enum { 
2126       Iex_Binder=0x1900,
2127       Iex_Get,
2128       Iex_GetI,
2129       Iex_RdTmp,
2130       Iex_Qop,
2131       Iex_Triop,
2132       Iex_Binop,
2133       Iex_Unop,
2134       Iex_Load,
2135       Iex_Const,
2136       Iex_ITE,
2137       Iex_CCall,
2138       Iex_VECRET,
2139       Iex_GSPTR
2140    }
2141    IRExprTag;
2142 
2143 /* An expression.  Stored as a tagged union.  'tag' indicates what kind
2144    of expression this is.  'Iex' is the union that holds the fields.  If
2145    an IRExpr 'e' has e.tag equal to Iex_Load, then it's a load
2146    expression, and the fields can be accessed with
2147    'e.Iex.Load.<fieldname>'.
2148 
2149    For each kind of expression, we show what it looks like when
2150    pretty-printed with ppIRExpr().
2151 */
2152 typedef
2153    struct _IRExpr
2154    IRExpr;
2155 
2156 struct _IRExpr {
2157    IRExprTag tag;
2158    union {
2159       /* Used only in pattern matching within Vex.  Should not be seen
2160          outside of Vex. */
2161       struct {
2162          Int binder;
2163       } Binder;
2164 
2165       /* Read a guest register, at a fixed offset in the guest state.
2166          ppIRExpr output: GET:<ty>(<offset>), eg. GET:I32(0)
2167       */
2168       struct {
2169          Int    offset;    /* Offset into the guest state */
2170          IRType ty;        /* Type of the value being read */
2171       } Get;
2172 
2173       /* Read a guest register at a non-fixed offset in the guest
2174          state.  This allows circular indexing into parts of the guest
2175          state, which is essential for modelling situations where the
2176          identity of guest registers is not known until run time.  One
2177          example is the x87 FP register stack.
2178 
2179          The part of the guest state to be treated as a circular array
2180          is described in the IRRegArray 'descr' field.  It holds the
2181          offset of the first element in the array, the type of each
2182          element, and the number of elements.
2183 
2184          The array index is indicated rather indirectly, in a way
2185          which makes optimisation easy: as the sum of variable part
2186          (the 'ix' field) and a constant offset (the 'bias' field).
2187 
2188          Since the indexing is circular, the actual array index to use
2189          is computed as (ix + bias) % num-of-elems-in-the-array.
2190 
2191          Here's an example.  The description
2192 
2193             (96:8xF64)[t39,-7]
2194 
2195          describes an array of 8 F64-typed values, the
2196          guest-state-offset of the first being 96.  This array is
2197          being indexed at (t39 - 7) % 8.
2198 
2199          It is important to get the array size/type exactly correct
2200          since IR optimisation looks closely at such info in order to
2201          establish aliasing/non-aliasing between seperate GetI and
2202          PutI events, which is used to establish when they can be
2203          reordered, etc.  Putting incorrect info in will lead to
2204          obscure IR optimisation bugs.
2205 
2206             ppIRExpr output: GETI<descr>[<ix>,<bias]
2207                          eg. GETI(128:8xI8)[t1,0]
2208       */
2209       struct {
2210          IRRegArray* descr; /* Part of guest state treated as circular */
2211          IRExpr*     ix;    /* Variable part of index into array */
2212          Int         bias;  /* Constant offset part of index into array */
2213       } GetI;
2214 
2215       /* The value held by a temporary.
2216          ppIRExpr output: t<tmp>, eg. t1
2217       */
2218       struct {
2219          IRTemp tmp;       /* The temporary number */
2220       } RdTmp;
2221 
2222       /* A quaternary operation.
2223          ppIRExpr output: <op>(<arg1>, <arg2>, <arg3>, <arg4>),
2224                       eg. MAddF64r32(t1, t2, t3, t4)
2225       */
2226       struct {
2227         IRQop* details;
2228       } Qop;
2229 
2230       /* A ternary operation.
2231          ppIRExpr output: <op>(<arg1>, <arg2>, <arg3>),
2232                       eg. MulF64(1, 2.0, 3.0)
2233       */
2234       struct {
2235         IRTriop* details;
2236       } Triop;
2237 
2238       /* A binary operation.
2239          ppIRExpr output: <op>(<arg1>, <arg2>), eg. Add32(t1,t2)
2240       */
2241       struct {
2242          IROp op;          /* op-code   */
2243          IRExpr* arg1;     /* operand 1 */
2244          IRExpr* arg2;     /* operand 2 */
2245       } Binop;
2246 
2247       /* A unary operation.
2248          ppIRExpr output: <op>(<arg>), eg. Neg8(t1)
2249       */
2250       struct {
2251          IROp    op;       /* op-code */
2252          IRExpr* arg;      /* operand */
2253       } Unop;
2254 
2255       /* A load from memory -- a normal load, not a load-linked.
2256          Load-Linkeds (and Store-Conditionals) are instead represented
2257          by IRStmt.LLSC since Load-Linkeds have side effects and so
2258          are not semantically valid IRExpr's.
2259          ppIRExpr output: LD<end>:<ty>(<addr>), eg. LDle:I32(t1)
2260       */
2261       struct {
2262          IREndness end;    /* Endian-ness of the load */
2263          IRType    ty;     /* Type of the loaded value */
2264          IRExpr*   addr;   /* Address being loaded from */
2265       } Load;
2266 
2267       /* A constant-valued expression.
2268          ppIRExpr output: <con>, eg. 0x4:I32
2269       */
2270       struct {
2271          IRConst* con;     /* The constant itself */
2272       } Const;
2273 
2274       /* A call to a pure (no side-effects) helper C function.
2275 
2276          With the 'cee' field, 'name' is the function's name.  It is
2277          only used for pretty-printing purposes.  The address to call
2278          (host address, of course) is stored in the 'addr' field
2279          inside 'cee'.
2280 
2281          The 'args' field is a NULL-terminated array of arguments.
2282          The stated return IRType, and the implied argument types,
2283          must match that of the function being called well enough so
2284          that the back end can actually generate correct code for the
2285          call.
2286 
2287          The called function **must** satisfy the following:
2288 
2289          * no side effects -- must be a pure function, the result of
2290            which depends only on the passed parameters.
2291 
2292          * it may not look at, nor modify, any of the guest state
2293            since that would hide guest state transitions from
2294            instrumenters
2295 
2296          * it may not access guest memory, since that would hide
2297            guest memory transactions from the instrumenters
2298 
2299          * it must not assume that arguments are being evaluated in a
2300            particular order. The oder of evaluation is unspecified.
2301 
2302          This is restrictive, but makes the semantics clean, and does
2303          not interfere with IR optimisation.
2304 
2305          If you want to call a helper which can mess with guest state
2306          and/or memory, instead use Ist_Dirty.  This is a lot more
2307          flexible, but you have to give a bunch of details about what
2308          the helper does (and you better be telling the truth,
2309          otherwise any derived instrumentation will be wrong).  Also
2310          Ist_Dirty inhibits various IR optimisations and so can cause
2311          quite poor code to be generated.  Try to avoid it.
2312 
2313          In principle it would be allowable to have the arg vector
2314          contain an IRExpr_VECRET(), although not IRExpr_GSPTR(). However,
2315          at the moment there is no requirement for clean helper calls to
2316          be able to return V128 or V256 values.  Hence this is not allowed.
2317 
2318          ppIRExpr output: <cee>(<args>):<retty>
2319                       eg. foo{0x80489304}(t1, t2):I32
2320       */
2321       struct {
2322          IRCallee* cee;    /* Function to call. */
2323          IRType    retty;  /* Type of return value. */
2324          IRExpr**  args;   /* Vector of argument expressions. */
2325       }  CCall;
2326 
2327       /* A ternary if-then-else operator.  It returns iftrue if cond is
2328          nonzero, iffalse otherwise.  Note that it is STRICT, ie. both
2329          iftrue and iffalse are evaluated in all cases.
2330 
2331          ppIRExpr output: ITE(<cond>,<iftrue>,<iffalse>),
2332                          eg. ITE(t6,t7,t8)
2333       */
2334       struct {
2335          IRExpr* cond;     /* Condition */
2336          IRExpr* iftrue;   /* True expression */
2337          IRExpr* iffalse;  /* False expression */
2338       } ITE;
2339    } Iex;
2340 };
2341 
2342 /* Expression auxiliaries: a ternary expression. */
2343 struct _IRTriop {
2344    IROp op;          /* op-code   */
2345    IRExpr* arg1;     /* operand 1 */
2346    IRExpr* arg2;     /* operand 2 */
2347    IRExpr* arg3;     /* operand 3 */
2348 };
2349 
2350 /* Expression auxiliaries: a quarternary expression. */
2351 struct _IRQop {
2352    IROp op;          /* op-code   */
2353    IRExpr* arg1;     /* operand 1 */
2354    IRExpr* arg2;     /* operand 2 */
2355    IRExpr* arg3;     /* operand 3 */
2356    IRExpr* arg4;     /* operand 4 */
2357 };
2358 
2359 
2360 /* Two special kinds of IRExpr, which can ONLY be used in
2361    argument lists for dirty helper calls (IRDirty.args) and in NO
2362    OTHER PLACES.  And then only in very limited ways.  */
2363 
2364 /* Denotes an argument which (in the helper) takes a pointer to a
2365    (naturally aligned) V128 or V256, into which the helper is expected
2366    to write its result.  Use of IRExpr_VECRET() is strictly
2367    controlled.  If the helper returns a V128 or V256 value then
2368    IRExpr_VECRET() must appear exactly once in the arg list, although
2369    it can appear anywhere, and the helper must have a C 'void' return
2370    type.  If the helper returns any other type, IRExpr_VECRET() may
2371    not appear in the argument list. */
2372 
2373 /* Denotes an void* argument which is passed to the helper, which at
2374    run time will point to the thread's guest state area.  This can
2375    only appear at most once in an argument list, and it may not appear
2376    at all in argument lists for clean helper calls. */
2377 
2378 static inline Bool is_IRExpr_VECRET_or_GSPTR ( const IRExpr* e ) {
2379    return e->tag == Iex_VECRET || e->tag == Iex_GSPTR;
2380 }
2381 
2382 
2383 /* Expression constructors. */
2384 extern IRExpr* IRExpr_Binder ( Int binder );
2385 extern IRExpr* IRExpr_Get    ( Int off, IRType ty );
2386 extern IRExpr* IRExpr_GetI   ( IRRegArray* descr, IRExpr* ix, Int bias );
2387 extern IRExpr* IRExpr_RdTmp  ( IRTemp tmp );
2388 extern IRExpr* IRExpr_Qop    ( IROp op, IRExpr* arg1, IRExpr* arg2, 
2389                                         IRExpr* arg3, IRExpr* arg4 );
2390 extern IRExpr* IRExpr_Triop  ( IROp op, IRExpr* arg1, 
2391                                         IRExpr* arg2, IRExpr* arg3 );
2392 extern IRExpr* IRExpr_Binop  ( IROp op, IRExpr* arg1, IRExpr* arg2 );
2393 extern IRExpr* IRExpr_Unop   ( IROp op, IRExpr* arg );
2394 extern IRExpr* IRExpr_Load   ( IREndness end, IRType ty, IRExpr* addr );
2395 extern IRExpr* IRExpr_Const  ( IRConst* con );
2396 extern IRExpr* IRExpr_CCall  ( IRCallee* cee, IRType retty, IRExpr** args );
2397 extern IRExpr* IRExpr_ITE    ( IRExpr* cond, IRExpr* iftrue, IRExpr* iffalse );
2398 extern IRExpr* IRExpr_VECRET ( void );
2399 extern IRExpr* IRExpr_GSPTR  ( void );
2400 
2401 /* Deep-copy an IRExpr. */
2402 extern IRExpr* deepCopyIRExpr ( const IRExpr* );
2403 
2404 /* Pretty-print an IRExpr. */
2405 extern void ppIRExpr ( const IRExpr* );
2406 
2407 /* NULL-terminated IRExpr vector constructors, suitable for
2408    use as arg lists in clean/dirty helper calls. */
2409 extern IRExpr** mkIRExprVec_0 ( void );
2410 extern IRExpr** mkIRExprVec_1 ( IRExpr* );
2411 extern IRExpr** mkIRExprVec_2 ( IRExpr*, IRExpr* );
2412 extern IRExpr** mkIRExprVec_3 ( IRExpr*, IRExpr*, IRExpr* );
2413 extern IRExpr** mkIRExprVec_4 ( IRExpr*, IRExpr*, IRExpr*, IRExpr* );
2414 extern IRExpr** mkIRExprVec_5 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2415                                 IRExpr* );
2416 extern IRExpr** mkIRExprVec_6 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2417                                 IRExpr*, IRExpr* );
2418 extern IRExpr** mkIRExprVec_7 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2419                                 IRExpr*, IRExpr*, IRExpr* );
2420 extern IRExpr** mkIRExprVec_8 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2421                                 IRExpr*, IRExpr*, IRExpr*, IRExpr* );
2422 extern IRExpr** mkIRExprVec_9 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2423                                 IRExpr*, IRExpr*, IRExpr*, IRExpr*, IRExpr* );
2424 extern IRExpr** mkIRExprVec_13 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2425                                  IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2426                                  IRExpr*, IRExpr*, IRExpr*, IRExpr*, IRExpr* );
2427 
2428 /* IRExpr copiers:
2429    - shallowCopy: shallow-copy (ie. create a new vector that shares the
2430      elements with the original).
2431    - deepCopy: deep-copy (ie. create a completely new vector). */
2432 extern IRExpr** shallowCopyIRExprVec ( IRExpr** );
2433 extern IRExpr** deepCopyIRExprVec ( IRExpr *const * );
2434 
2435 /* Make a constant expression from the given host word taking into
2436    account (of course) the host word size. */
2437 extern IRExpr* mkIRExpr_HWord ( HWord );
2438 
2439 /* Convenience function for constructing clean helper calls. */
2440 extern 
2441 IRExpr* mkIRExprCCall ( IRType retty,
2442                         Int regparms, const HChar* name, void* addr, 
2443                         IRExpr** args );
2444 
2445 
2446 /* Convenience functions for atoms (IRExprs which are either Iex_Tmp or
2447  * Iex_Const). */
2448 static inline Bool isIRAtom ( const IRExpr* e ) {
2449    return e->tag == Iex_RdTmp || e->tag == Iex_Const;
2450 }
2451 
2452 /* Are these two IR atoms identical?  Causes an assertion
2453    failure if they are passed non-atoms. */
2454 extern Bool eqIRAtom ( const IRExpr*, const IRExpr* );
2455 
2456 
2457 /* ------------------ Jump kinds ------------------ */
2458 
2459 /* This describes hints which can be passed to the dispatcher at guest
2460    control-flow transfer points.
2461 
2462    Re Ijk_InvalICache and Ijk_FlushDCache: the guest state _must_ have
2463    two pseudo-registers, guest_CMSTART and guest_CMLEN, which specify
2464    the start and length of the region to be invalidated.  CM stands
2465    for "Cache Management".  These are both the size of a guest word.
2466    It is the responsibility of the relevant toIR.c to ensure that
2467    these are filled in with suitable values before issuing a jump of
2468    kind Ijk_InvalICache or Ijk_FlushDCache.
2469 
2470    Ijk_InvalICache requests invalidation of translations taken from
2471    the requested range.  Ijk_FlushDCache requests flushing of the D
2472    cache for the specified range.
2473 
2474    Re Ijk_EmWarn and Ijk_EmFail: the guest state must have a
2475    pseudo-register guest_EMNOTE, which is 32-bits regardless of the
2476    host or guest word size.  That register should be made to hold a
2477    VexEmNote value to indicate the reason for the exit.
2478 
2479    In the case of Ijk_EmFail, the exit is fatal (Vex-generated code
2480    cannot continue) and so the jump destination can be anything.
2481 
2482    Re Ijk_Sys_ (syscall jumps): the guest state must have a
2483    pseudo-register guest_IP_AT_SYSCALL, which is the size of a guest
2484    word.  Front ends should set this to be the IP at the most recently
2485    executed kernel-entering (system call) instruction.  This makes it
2486    very much easier (viz, actually possible at all) to back up the
2487    guest to restart a syscall that has been interrupted by a signal.
2488 
2489    Re Ijk_Extension: the guest state must have the pseudo-register
2490    guest_IP_AT_SYSCALL, which is also used for Ijk_Sys_*.  Front ends
2491    must set this to the current instruction address before jumping to
2492    an extension handler.
2493 */
2494 typedef
2495    enum {
2496       Ijk_INVALID=0x1A00, 
2497       Ijk_Boring,         /* not interesting; just goto next */
2498       Ijk_Call,           /* guest is doing a call */
2499       Ijk_Ret,            /* guest is doing a return */
2500       Ijk_ClientReq,      /* do guest client req before continuing */
2501       Ijk_Yield,          /* client is yielding to thread scheduler */
2502       Ijk_EmWarn,         /* report emulation warning before continuing */
2503       Ijk_EmFail,         /* emulation critical (FATAL) error; give up */
2504       Ijk_NoDecode,       /* current instruction cannot be decoded */
2505       Ijk_MapFail,        /* Vex-provided address translation failed */
2506       Ijk_InvalICache,    /* Inval icache for range [CMSTART, +CMLEN) */
2507       Ijk_FlushDCache,    /* Flush dcache for range [CMSTART, +CMLEN) */
2508       Ijk_NoRedir,        /* Jump to un-redirected guest addr */
2509       Ijk_SigILL,         /* current instruction synths SIGILL */
2510       Ijk_SigTRAP,        /* current instruction synths SIGTRAP */
2511       Ijk_SigSEGV,        /* current instruction synths SIGSEGV */
2512       Ijk_SigBUS,         /* current instruction synths SIGBUS */
2513       Ijk_SigFPE,         /* current instruction synths generic SIGFPE */
2514       Ijk_SigFPE_IntDiv,  /* current instruction synths SIGFPE - IntDiv */
2515       Ijk_SigFPE_IntOvf,  /* current instruction synths SIGFPE - IntOvf */
2516       /* Unfortunately, various guest-dependent syscall kinds.  They
2517      all mean: do a syscall before continuing. */
2518       Ijk_Sys_syscall,    /* amd64/x86 'syscall', ppc 'sc', arm 'svc #0' */
2519       Ijk_Sys_int32,      /* amd64/x86 'int $0x20' */
2520       Ijk_Sys_int128,     /* amd64/x86 'int $0x80' */
2521       Ijk_Sys_int129,     /* amd64/x86 'int $0x81' */
2522       Ijk_Sys_int130,     /* amd64/x86 'int $0x82' */
2523       Ijk_Sys_int145,     /* amd64/x86 'int $0x91' */
2524       Ijk_Sys_int210,     /* amd64/x86 'int $0xD2' */
2525       Ijk_Sys_sysenter,   /* x86 'sysenter'.  guest_EIP becomes
2526                              invalid at the point this happens. */
2527       Ijk_Extension,      /* invoke guest-specific extension */
2528    }
2529    IRJumpKind;
2530 
2531 extern void ppIRJumpKind ( IRJumpKind );
2532 
2533 
2534 /* ------------------ Dirty helper calls ------------------ */
2535 
2536 /* A dirty call is a flexible mechanism for calling (possibly
2537    conditionally) a helper function or procedure.  The helper function
2538    may read, write or modify client memory, and may read, write or
2539    modify client state.  It can take arguments and optionally return a
2540    value.  It may return different results and/or do different things
2541    when called repeatedly with the same arguments, by means of storing
2542    private state.
2543 
2544    If a value is returned, it is assigned to the nominated return
2545    temporary.
2546 
2547    Dirty calls are statements rather than expressions for obvious
2548    reasons.  If a dirty call is marked as writing guest state, any
2549    pre-existing values derived from the written parts of the guest
2550    state are invalid.  Similarly, if the dirty call is stated as
2551    writing memory, any pre-existing loaded values are invalidated by
2552    it.
2553 
2554    In order that instrumentation is possible, the call must state, and
2555    state correctly:
2556 
2557    * Whether it reads, writes or modifies memory, and if so where.
2558 
2559    * Whether it reads, writes or modifies guest state, and if so which
2560      pieces.  Several pieces may be stated, and their extents must be
2561      known at translation-time.  Each piece is allowed to repeat some
2562      number of times at a fixed interval, if required.
2563 
2564    Normally, code is generated to pass just the args to the helper.
2565    However, if IRExpr_GSPTR() is present in the argument list (at most
2566    one instance is allowed), then the guest state pointer is passed for
2567    that arg, so that the callee can access the guest state.  It is
2568    invalid for .nFxState to be zero but IRExpr_GSPTR() to be present,
2569    since .nFxState==0 is a claim that the call does not access guest
2570    state.
2571 
2572    IMPORTANT NOTE re GUARDS: Dirty calls are strict, very strict.  The
2573    arguments and 'mFx' are evaluated REGARDLESS of the guard value.
2574    The order of argument evaluation is unspecified.  The guard
2575    expression is evaluated AFTER the arguments and 'mFx' have been
2576    evaluated.  'mFx' is expected (by Memcheck) to be a defined value
2577    even if the guard evaluates to false.
2578 */
2579 
2580 #define VEX_N_FXSTATE  7   /* enough for FXSAVE/FXRSTOR on x86 */
2581 
2582 /* Effects on resources (eg. registers, memory locations) */
2583 typedef
2584    enum {
2585       Ifx_None=0x1B00,      /* no effect */
2586       Ifx_Read,             /* reads the resource */
2587       Ifx_Write,            /* writes the resource */
2588       Ifx_Modify,           /* modifies the resource */
2589    }
2590    IREffect;
2591 
2592 /* Pretty-print an IREffect */
2593 extern void ppIREffect ( IREffect );
2594 
2595 typedef
2596    struct _IRDirty {
2597       /* What to call, and details of args/results.  .guard must be
2598          non-NULL.  If .tmp is not IRTemp_INVALID, then the call
2599          returns a result which is placed in .tmp.  If at runtime the
2600          guard evaluates to false, .tmp has an 0x555..555 bit pattern
2601          written to it.  Hence conditional calls that assign .tmp are
2602          allowed. */
2603       IRCallee* cee;    /* where to call */
2604       IRExpr*   guard;  /* :: Ity_Bit.  Controls whether call happens */
2605       /* The args vector may contain IRExpr_GSPTR() and/or
2606          IRExpr_VECRET(), in both cases, at most once. */
2607       IRExpr**  args;   /* arg vector, ends in NULL. */
2608       IRTemp    tmp;    /* to assign result to, or IRTemp_INVALID if none */
2609 
2610       /* Mem effects; we allow only one R/W/M region to be stated */
2611       IREffect  mFx;    /* indicates memory effects, if any */
2612       IRExpr*   mAddr;  /* of access, or NULL if mFx==Ifx_None */
2613       Int       mSize;  /* of access, or zero if mFx==Ifx_None */
2614 
2615       /* Guest state effects; up to N allowed */
2616       Int  nFxState; /* must be 0 .. VEX_N_FXSTATE */
2617       struct {
2618          IREffect fx:16;   /* read, write or modify?  Ifx_None is invalid. */
2619          UShort   offset;
2620          UShort   size;
2621          UChar    nRepeats;
2622          UChar    repeatLen;
2623       } fxState[VEX_N_FXSTATE];
2624       /* The access can be repeated, as specified by nRepeats and
2625          repeatLen.  To describe only a single access, nRepeats and
2626          repeatLen should be zero.  Otherwise, repeatLen must be a
2627          multiple of size and greater than size. */
2628       /* Overall, the parts of the guest state denoted by (offset,
2629          size, nRepeats, repeatLen) is
2630                [offset, +size)
2631             and, if nRepeats > 0,
2632                for (i = 1; i <= nRepeats; i++)
2633                   [offset + i * repeatLen, +size)
2634          A convenient way to enumerate all segments is therefore
2635             for (i = 0; i < 1 + nRepeats; i++)
2636                [offset + i * repeatLen, +size)
2637       */
2638    }
2639    IRDirty;
2640 
2641 /* Pretty-print a dirty call */
2642 extern void     ppIRDirty ( const IRDirty* );
2643 
2644 /* Allocate an uninitialised dirty call */
2645 extern IRDirty* emptyIRDirty ( void );
2646 
2647 /* Deep-copy a dirty call */
2648 extern IRDirty* deepCopyIRDirty ( const IRDirty* );
2649 
2650 /* A handy function which takes some of the tedium out of constructing
2651    dirty helper calls.  The called function impliedly does not return
2652    any value and has a constant-True guard.  The call is marked as
2653    accessing neither guest state nor memory (hence the "unsafe"
2654    designation) -- you can change this marking later if need be.  A
2655    suitable IRCallee is constructed from the supplied bits. */
2656 extern 
2657 IRDirty* unsafeIRDirty_0_N ( Int regparms, const HChar* name, void* addr, 
2658                              IRExpr** args );
2659 
2660 /* Similarly, make a zero-annotation dirty call which returns a value,
2661    and assign that to the given temp. */
2662 extern 
2663 IRDirty* unsafeIRDirty_1_N ( IRTemp dst, 
2664                              Int regparms, const HChar* name, void* addr, 
2665                              IRExpr** args );
2666 
2667 
2668 /* --------------- Memory Bus Events --------------- */
2669 
2670 typedef
2671    enum { 
2672       Imbe_Fence=0x1C00, 
2673       /* Needed only on ARM.  It cancels a reservation made by a
2674          preceding Linked-Load, and needs to be handed through to the
2675          back end, just as LL and SC themselves are. */
2676       Imbe_CancelReservation
2677    }
2678    IRMBusEvent;
2679 
2680 extern void ppIRMBusEvent ( IRMBusEvent );
2681 
2682 
2683 /* --------------- Compare and Swap --------------- */
2684 
2685 /* This denotes an atomic compare and swap operation, either
2686    a single-element one or a double-element one.
2687 
2688    In the single-element case:
2689 
2690      .addr is the memory address.
2691      .end  is the endianness with which memory is accessed
2692 
2693      If .addr contains the same value as .expdLo, then .dataLo is
2694      written there, else there is no write.  In both cases, the
2695      original value at .addr is copied into .oldLo.
2696 
2697      Types: .expdLo, .dataLo and .oldLo must all have the same type.
2698      It may be any integral type, viz: I8, I16, I32 or, for 64-bit
2699      guests, I64.
2700 
2701      .oldHi must be IRTemp_INVALID, and .expdHi and .dataHi must
2702      be NULL.
2703 
2704    In the double-element case:
2705 
2706      .addr is the memory address.
2707      .end  is the endianness with which memory is accessed
2708 
2709      The operation is the same:
2710 
2711      If .addr contains the same value as .expdHi:.expdLo, then
2712      .dataHi:.dataLo is written there, else there is no write.  In
2713      both cases the original value at .addr is copied into
2714      .oldHi:.oldLo.
2715 
2716      Types: .expdHi, .expdLo, .dataHi, .dataLo, .oldHi, .oldLo must
2717      all have the same type, which may be any integral type, viz: I8,
2718      I16, I32 or, for 64-bit guests, I64.
2719 
2720      The double-element case is complicated by the issue of
2721      endianness.  In all cases, the two elements are understood to be
2722      located adjacently in memory, starting at the address .addr.
2723 
2724        If .end is Iend_LE, then the .xxxLo component is at the lower
2725        address and the .xxxHi component is at the higher address, and
2726        each component is itself stored little-endianly.
2727 
2728        If .end is Iend_BE, then the .xxxHi component is at the lower
2729        address and the .xxxLo component is at the higher address, and
2730        each component is itself stored big-endianly.
2731 
2732    This allows representing more cases than most architectures can
2733    handle.  For example, x86 cannot do DCAS on 8- or 16-bit elements.
2734 
2735    How to know if the CAS succeeded?
2736 
2737    * if .oldLo == .expdLo (resp. .oldHi:.oldLo == .expdHi:.expdLo),
2738      then the CAS succeeded, .dataLo (resp. .dataHi:.dataLo) is now
2739      stored at .addr, and the original value there was .oldLo (resp
2740      .oldHi:.oldLo).
2741 
2742    * if .oldLo != .expdLo (resp. .oldHi:.oldLo != .expdHi:.expdLo),
2743      then the CAS failed, and the original value at .addr was .oldLo
2744      (resp. .oldHi:.oldLo).
2745 
2746    Hence it is easy to know whether or not the CAS succeeded.
2747 */
2748 typedef
2749    struct {
2750       IRTemp    oldHi;  /* old value of *addr is written here */
2751       IRTemp    oldLo;
2752       IREndness end;    /* endianness of the data in memory */
2753       IRExpr*   addr;   /* store address */
2754       IRExpr*   expdHi; /* expected old value at *addr */
2755       IRExpr*   expdLo;
2756       IRExpr*   dataHi; /* new value for *addr */
2757       IRExpr*   dataLo;
2758    }
2759    IRCAS;
2760 
2761 extern void ppIRCAS ( const IRCAS* cas );
2762 
2763 extern IRCAS* mkIRCAS ( IRTemp oldHi, IRTemp oldLo,
2764                         IREndness end, IRExpr* addr, 
2765                         IRExpr* expdHi, IRExpr* expdLo,
2766                         IRExpr* dataHi, IRExpr* dataLo );
2767 
2768 extern IRCAS* deepCopyIRCAS ( const IRCAS* );
2769 
2770 
2771 /* ------------------ Circular Array Put ------------------ */
2772 
2773 typedef
2774    struct {
2775       IRRegArray* descr; /* Part of guest state treated as circular */
2776       IRExpr*     ix;    /* Variable part of index into array */
2777       Int         bias;  /* Constant offset part of index into array */
2778       IRExpr*     data;  /* The value to write */
2779    } IRPutI;
2780 
2781 extern void ppIRPutI ( const IRPutI* puti );
2782 
2783 extern IRPutI* mkIRPutI ( IRRegArray* descr, IRExpr* ix,
2784                           Int bias, IRExpr* data );
2785 
2786 extern IRPutI* deepCopyIRPutI ( const IRPutI* );
2787 
2788 
2789 /* --------------- Guarded loads and stores --------------- */
2790 
2791 /* Conditional stores are straightforward.  They are the same as
2792    normal stores, with an extra 'guard' field :: Ity_I1 that
2793    determines whether or not the store actually happens.  If not,
2794    memory is unmodified.
2795 
2796    The semantics of this is that 'addr' and 'data' are fully evaluated
2797    even in the case where 'guard' evaluates to zero (false).
2798 */
2799 typedef
2800    struct {
2801       IREndness end;    /* Endianness of the store */
2802       IRExpr*   addr;   /* store address */
2803       IRExpr*   data;   /* value to write */
2804       IRExpr*   guard;  /* Guarding value */
2805    }
2806    IRStoreG;
2807 
2808 /* Conditional loads are a little more complex.  'addr' is the
2809    address, 'guard' is the guarding condition.  If the load takes
2810    place, the loaded value is placed in 'dst'.  If it does not take
2811    place, 'alt' is copied to 'dst'.  However, the loaded value is not
2812    placed directly in 'dst' -- it is first subjected to the conversion
2813    specified by 'cvt'.
2814 
2815    For example, imagine doing a conditional 8-bit load, in which the
2816    loaded value is zero extended to 32 bits.  Hence:
2817    * 'dst' and 'alt' must have type I32
2818    * 'cvt' must be a unary op which converts I8 to I32.  In this 
2819      example, it would be ILGop_8Uto32.
2820 
2821    There is no explicit indication of the type at which the load is
2822    done, since that is inferrable from the arg type of 'cvt'.  Note
2823    that the types of 'alt' and 'dst' and the result type of 'cvt' must
2824    all be the same.
2825 
2826    Semantically, 'addr' is evaluated even in the case where 'guard'
2827    evaluates to zero (false), and 'alt' is evaluated even when 'guard'
2828    evaluates to one (true).  That is, 'addr' and 'alt' are always
2829    evaluated.
2830 */
2831 typedef
2832    enum {
2833       ILGop_INVALID=0x1D00,
2834       ILGop_IdentV128, /* 128 bit vector, no conversion */
2835       ILGop_Ident64,   /* 64 bit, no conversion */
2836       ILGop_Ident32,   /* 32 bit, no conversion */
2837       ILGop_16Uto32,   /* 16 bit load, Z-widen to 32 */
2838       ILGop_16Sto32,   /* 16 bit load, S-widen to 32 */
2839       ILGop_8Uto32,    /* 8 bit load, Z-widen to 32 */
2840       ILGop_8Sto32     /* 8 bit load, S-widen to 32 */
2841    }
2842    IRLoadGOp;
2843 
2844 typedef
2845    struct {
2846       IREndness end;    /* Endianness of the load */
2847       IRLoadGOp cvt;    /* Conversion to apply to the loaded value */
2848       IRTemp    dst;    /* Destination (LHS) of assignment */
2849       IRExpr*   addr;   /* Address being loaded from */
2850       IRExpr*   alt;    /* Value if load is not done. */
2851       IRExpr*   guard;  /* Guarding value */
2852    }
2853    IRLoadG;
2854 
2855 extern void ppIRStoreG ( const IRStoreG* sg );
2856 
2857 extern void ppIRLoadGOp ( IRLoadGOp cvt );
2858 
2859 extern void ppIRLoadG ( const IRLoadG* lg );
2860 
2861 extern IRStoreG* mkIRStoreG ( IREndness end,
2862                               IRExpr* addr, IRExpr* data,
2863                               IRExpr* guard );
2864 
2865 extern IRLoadG* mkIRLoadG ( IREndness end, IRLoadGOp cvt,
2866                             IRTemp dst, IRExpr* addr, IRExpr* alt, 
2867                             IRExpr* guard );
2868 
2869 
2870 /* ------------------ Statements ------------------ */
2871 
2872 /* The different kinds of statements.  Their meaning is explained
2873    below in the comments for IRStmt.
2874 
2875    Those marked META do not represent code, but rather extra
2876    information about the code.  These statements can be removed
2877    without affecting the functional behaviour of the code, however
2878    they are required by some IR consumers such as tools that
2879    instrument the code.
2880 */
2881 
2882 typedef 
2883    enum {
2884       Ist_NoOp=0x1E00,
2885       Ist_IMark,     /* META */
2886       Ist_AbiHint,   /* META */
2887       Ist_Put,
2888       Ist_PutI,
2889       Ist_WrTmp,
2890       Ist_Store,
2891       Ist_LoadG,
2892       Ist_StoreG,
2893       Ist_CAS,
2894       Ist_LLSC,
2895       Ist_Dirty,
2896       Ist_MBE,
2897       Ist_Exit
2898    } 
2899    IRStmtTag;
2900 
2901 /* A statement.  Stored as a tagged union.  'tag' indicates what kind
2902    of expression this is.  'Ist' is the union that holds the fields.
2903    If an IRStmt 'st' has st.tag equal to Iex_Store, then it's a store
2904    statement, and the fields can be accessed with
2905    'st.Ist.Store.<fieldname>'.
2906 
2907    For each kind of statement, we show what it looks like when
2908    pretty-printed with ppIRStmt().
2909 */
2910 typedef
2911    struct _IRStmt {
2912       IRStmtTag tag;
2913       union {
2914          /* A no-op (usually resulting from IR optimisation).  Can be
2915             omitted without any effect.
2916 
2917             ppIRStmt output: IR-NoOp
2918          */
2919          struct {
2920      } NoOp;
2921 
2922          /* META: instruction mark.  Marks the start of the statements
2923             that represent a single machine instruction (the end of
2924             those statements is marked by the next IMark or the end of
2925             the IRSB).  Contains the address and length of the
2926             instruction.
2927 
2928             It also contains a delta value.  The delta must be
2929             subtracted from a guest program counter value before
2930             attempting to establish, by comparison with the address
2931             and length values, whether or not that program counter
2932             value refers to this instruction.  For x86, amd64, ppc32,
2933             ppc64 and arm, the delta value is zero.  For Thumb
2934             instructions, the delta value is one.  This is because, on
2935             Thumb, guest PC values (guest_R15T) are encoded using the
2936             top 31 bits of the instruction address and a 1 in the lsb;
2937             hence they appear to be (numerically) 1 past the start of
2938             the instruction they refer to.  IOW, guest_R15T on ARM
2939             holds a standard ARM interworking address.
2940 
2941             ppIRStmt output: ------ IMark(<addr>, <len>, <delta>) ------,
2942                          eg. ------ IMark(0x4000792, 5, 0) ------,
2943          */
2944          struct {
2945             Addr   addr;   /* instruction address */
2946             UInt   len;    /* instruction length */
2947             UChar  delta;  /* addr = program counter as encoded in guest state
2948                                      - delta */
2949          } IMark;
2950 
2951          /* META: An ABI hint, which says something about this
2952             platform's ABI.
2953 
2954             At the moment, the only AbiHint is one which indicates
2955             that a given chunk of address space, [base .. base+len-1],
2956             has become undefined.  This is used on amd64-linux and
2957             some ppc variants to pass stack-redzoning hints to whoever
2958             wants to see them.  It also indicates the address of the
2959             next (dynamic) instruction that will be executed.  This is
2960             to help Memcheck to origin tracking.
2961 
2962             ppIRStmt output: ====== AbiHint(<base>, <len>, <nia>) ======
2963                          eg. ====== AbiHint(t1, 16, t2) ======
2964          */
2965          struct {
2966             IRExpr* base;     /* Start  of undefined chunk */
2967             Int     len;      /* Length of undefined chunk */
2968             IRExpr* nia;      /* Address of next (guest) insn */
2969          } AbiHint;
2970 
2971          /* Write a guest register, at a fixed offset in the guest state.
2972             ppIRStmt output: PUT(<offset>) = <data>, eg. PUT(60) = t1
2973          */
2974          struct {
2975             Int     offset;   /* Offset into the guest state */
2976             IRExpr* data;     /* The value to write */
2977          } Put;
2978 
2979          /* Write a guest register, at a non-fixed offset in the guest
2980             state.  See the comment for GetI expressions for more
2981             information.
2982 
2983             ppIRStmt output: PUTI<descr>[<ix>,<bias>] = <data>,
2984                          eg. PUTI(64:8xF64)[t5,0] = t1
2985          */
2986          struct {
2987             IRPutI* details;
2988          } PutI;
2989 
2990          /* Assign a value to a temporary.  Note that SSA rules require
2991             each tmp is only assigned to once.  IR sanity checking will
2992             reject any block containing a temporary which is not assigned
2993             to exactly once.
2994 
2995             ppIRStmt output: t<tmp> = <data>, eg. t1 = 3
2996          */
2997          struct {
2998             IRTemp  tmp;   /* Temporary  (LHS of assignment) */
2999             IRExpr* data;  /* Expression (RHS of assignment) */
3000          } WrTmp;
3001 
3002          /* Write a value to memory.  This is a normal store, not a
3003             Store-Conditional.  To represent a Store-Conditional,
3004             instead use IRStmt.LLSC.
3005             ppIRStmt output: ST<end>(<addr>) = <data>, eg. STle(t1) = t2
3006          */
3007          struct {
3008             IREndness end;    /* Endianness of the store */
3009             IRExpr*   addr;   /* store address */
3010             IRExpr*   data;   /* value to write */
3011          } Store;
3012 
3013          /* Guarded store.  Note that this is defined to evaluate all
3014             expression fields (addr, data) even if the guard evaluates
3015             to false.
3016             ppIRStmt output:
3017               if (<guard>) ST<end>(<addr>) = <data> */
3018          struct {
3019             IRStoreG* details;
3020          } StoreG;
3021 
3022          /* Guarded load.  Note that this is defined to evaluate all
3023             expression fields (addr, alt) even if the guard evaluates
3024             to false.
3025             ppIRStmt output:
3026               t<tmp> = if (<guard>) <cvt>(LD<end>(<addr>)) else <alt> */
3027          struct {
3028             IRLoadG* details;
3029          } LoadG;
3030 
3031          /* Do an atomic compare-and-swap operation.  Semantics are
3032             described above on a comment at the definition of IRCAS.
3033 
3034             ppIRStmt output:
3035                t<tmp> = CAS<end>(<addr> :: <expected> -> <new>)
3036             eg
3037                t1 = CASle(t2 :: t3->Add32(t3,1))
3038                which denotes a 32-bit atomic increment 
3039                of a value at address t2
3040 
3041             A double-element CAS may also be denoted, in which case <tmp>,
3042             <expected> and <new> are all pairs of items, separated by
3043             commas.
3044          */
3045          struct {
3046             IRCAS* details;
3047          } CAS;
3048 
3049          /* Either Load-Linked or Store-Conditional, depending on
3050             STOREDATA.
3051 
3052             If STOREDATA is NULL then this is a Load-Linked, meaning
3053             that data is loaded from memory as normal, but a
3054             'reservation' for the address is also lodged in the
3055             hardware.
3056 
3057                result = Load-Linked(addr, end)
3058 
3059             The data transfer type is the type of RESULT (I32, I64,
3060             etc).  ppIRStmt output:
3061 
3062                result = LD<end>-Linked(<addr>), eg. LDbe-Linked(t1)
3063 
3064             If STOREDATA is not NULL then this is a Store-Conditional,
3065             hence:
3066 
3067                result = Store-Conditional(addr, storedata, end)
3068 
3069             The data transfer type is the type of STOREDATA and RESULT
3070             has type Ity_I1. The store may fail or succeed depending
3071             on the state of a previously lodged reservation on this
3072             address.  RESULT is written 1 if the store succeeds and 0
3073             if it fails.  eg ppIRStmt output:
3074 
3075                result = ( ST<end>-Cond(<addr>) = <storedata> )
3076                eg t3 = ( STbe-Cond(t1, t2) )
3077 
3078             In all cases, the address must be naturally aligned for
3079             the transfer type -- any misaligned addresses should be
3080             caught by a dominating IR check and side exit.  This
3081             alignment restriction exists because on at least some
3082             LL/SC platforms (ppc), stwcx. etc will trap w/ SIGBUS on
3083             misaligned addresses, and we have to actually generate
3084             stwcx. on the host, and we don't want it trapping on the
3085             host.
3086 
3087             Summary of rules for transfer type:
3088               STOREDATA == NULL (LL):
3089                 transfer type = type of RESULT
3090               STOREDATA != NULL (SC):
3091                 transfer type = type of STOREDATA, and RESULT :: Ity_I1
3092          */
3093          struct {
3094             IREndness end;
3095             IRTemp    result;
3096             IRExpr*   addr;
3097             IRExpr*   storedata; /* NULL => LL, non-NULL => SC */
3098          } LLSC;
3099 
3100          /* Call (possibly conditionally) a C function that has side
3101             effects (ie. is "dirty").  See the comments above the
3102             IRDirty type declaration for more information.
3103 
3104             ppIRStmt output:
3105                t<tmp> = DIRTY <guard> <effects> 
3106                   ::: <callee>(<args>)
3107             eg.
3108                t1 = DIRTY t27 RdFX-gst(16,4) RdFX-gst(60,4)
3109                      ::: foo{0x380035f4}(t2)
3110          */       
3111          struct {
3112             IRDirty* details;
3113          } Dirty;
3114 
3115          /* A memory bus event - a fence, or acquisition/release of the
3116             hardware bus lock.  IR optimisation treats all these as fences
3117             across which no memory references may be moved.
3118             ppIRStmt output: MBusEvent-Fence,
3119                              MBusEvent-BusLock, MBusEvent-BusUnlock.
3120          */
3121          struct {
3122             IRMBusEvent event;
3123          } MBE;
3124 
3125          /* Conditional exit from the middle of an IRSB.
3126             ppIRStmt output: if (<guard>) goto {<jk>} <dst>
3127                          eg. if (t69) goto {Boring} 0x4000AAA:I32
3128             If <guard> is true, the guest state is also updated by
3129             PUT-ing <dst> at <offsIP>.  This is done because a
3130             taken exit must update the guest program counter.
3131          */
3132          struct {
3133             IRExpr*    guard;    /* Conditional expression */
3134             IRConst*   dst;      /* Jump target (constant only) */
3135             IRJumpKind jk;       /* Jump kind */
3136             Int        offsIP;   /* Guest state offset for IP */
3137          } Exit;
3138       } Ist;
3139    }
3140    IRStmt;
3141 
3142 /* Statement constructors. */
3143 extern IRStmt* IRStmt_NoOp    ( void );
3144 extern IRStmt* IRStmt_IMark   ( Addr addr, UInt len, UChar delta );
3145 extern IRStmt* IRStmt_AbiHint ( IRExpr* base, Int len, IRExpr* nia );
3146 extern IRStmt* IRStmt_Put     ( Int off, IRExpr* data );
3147 extern IRStmt* IRStmt_PutI    ( IRPutI* details );
3148 extern IRStmt* IRStmt_WrTmp   ( IRTemp tmp, IRExpr* data );
3149 extern IRStmt* IRStmt_Store   ( IREndness end, IRExpr* addr, IRExpr* data );
3150 extern IRStmt* IRStmt_StoreG  ( IREndness end, IRExpr* addr, IRExpr* data,
3151                                 IRExpr* guard );
3152 extern IRStmt* IRStmt_LoadG   ( IREndness end, IRLoadGOp cvt, IRTemp dst,
3153                                 IRExpr* addr, IRExpr* alt, IRExpr* guard );
3154 extern IRStmt* IRStmt_CAS     ( IRCAS* details );
3155 extern IRStmt* IRStmt_LLSC    ( IREndness end, IRTemp result,
3156                                 IRExpr* addr, IRExpr* storedata );
3157 extern IRStmt* IRStmt_Dirty   ( IRDirty* details );
3158 extern IRStmt* IRStmt_MBE     ( IRMBusEvent event );
3159 extern IRStmt* IRStmt_Exit    ( IRExpr* guard, IRJumpKind jk, IRConst* dst,
3160                                 Int offsIP );
3161 
3162 /* Deep-copy an IRStmt. */
3163 extern IRStmt* deepCopyIRStmt ( const IRStmt* );
3164 
3165 /* Pretty-print an IRStmt. */
3166 extern void ppIRStmt ( const IRStmt* );
3167 
3168 
3169 /* ------------------ Basic Blocks ------------------ */
3170 
3171 /* Type environments: a bunch of statements, expressions, etc, are
3172    incomplete without an environment indicating the type of each
3173    IRTemp.  So this provides one.  IR temporaries are really just
3174    unsigned ints and so this provides an array, 0 .. n_types_used-1 of
3175    them.
3176 */
3177 typedef
3178    struct {
3179       IRType* types;
3180       Int     types_size;
3181       Int     types_used;
3182    }
3183    IRTypeEnv;
3184 
3185 /* Obtain a new IRTemp */
3186 extern IRTemp newIRTemp ( IRTypeEnv*, IRType );
3187 
3188 /* Deep-copy a type environment */
3189 extern IRTypeEnv* deepCopyIRTypeEnv ( const IRTypeEnv* );
3190 
3191 /* Pretty-print a type environment */
3192 extern void ppIRTypeEnv ( const IRTypeEnv* );
3193 
3194 
3195 /* Code blocks, which in proper compiler terminology are superblocks
3196    (single entry, multiple exit code sequences) contain:
3197 
3198    - A table giving a type for each temp (the "type environment")
3199    - An expandable array of statements
3200    - An expression of type 32 or 64 bits, depending on the
3201      guest's word size, indicating the next destination if the block 
3202      executes all the way to the end, without a side exit
3203    - An indication of any special actions (JumpKind) needed
3204      for this final jump.
3205    - Offset of the IP field in the guest state.  This will be
3206      updated before the final jump is done.
3207    
3208    "IRSB" stands for "IR Super Block".
3209 */
3210 typedef
3211    struct {
3212       IRTypeEnv* tyenv;
3213       IRStmt**   stmts;
3214       Int        stmts_size;
3215       Int        stmts_used;
3216       IRExpr*    next;
3217       IRJumpKind jumpkind;
3218       Int        offsIP;
3219    }
3220    IRSB;
3221 
3222 /* Allocate a new, uninitialised IRSB */
3223 extern IRSB* emptyIRSB ( void );
3224 
3225 /* Deep-copy an IRSB */
3226 extern IRSB* deepCopyIRSB ( const IRSB* );
3227 
3228 /* Deep-copy an IRSB, except for the statements list, which set to be
3229    a new, empty, list of statements. */
3230 extern IRSB* deepCopyIRSBExceptStmts ( const IRSB* );
3231 
3232 /* Pretty-print an IRSB */
3233 extern void ppIRSB ( const IRSB* );
3234 
3235 /* Append an IRStmt to an IRSB */
3236 extern void addStmtToIRSB ( IRSB*, IRStmt* );
3237 
3238 
3239 /*---------------------------------------------------------------*/
3240 /*--- Helper functions for the IR                             ---*/
3241 /*---------------------------------------------------------------*/
3242 
3243 /* For messing with IR type environments */
3244 extern IRTypeEnv* emptyIRTypeEnv  ( void );
3245 
3246 /* What is the type of this expression? */
3247 extern IRType typeOfIRConst ( const IRConst* );
3248 extern IRType typeOfIRTemp  ( const IRTypeEnv*, IRTemp );
3249 extern IRType typeOfIRExpr  ( const IRTypeEnv*, const IRExpr* );
3250 
3251 /* What are the arg and result type for this IRLoadGOp? */
3252 extern void typeOfIRLoadGOp ( IRLoadGOp cvt,
3253                               /*OUT*/IRType* t_res,
3254                               /*OUT*/IRType* t_arg );
3255 
3256 /* Sanity check a BB of IR */
3257 extern void sanityCheckIRSB ( const  IRSB*  bb, 
3258                               const  HChar* caller,
3259                               Bool   require_flatness, 
3260                               IRType guest_word_size );
3261 extern Bool isFlatIRStmt ( const IRStmt* );
3262 extern Bool isFlatIRSB ( const IRSB* );
3263 
3264 /* Is this any value actually in the enumeration 'IRType' ? */
3265 extern Bool isPlausibleIRType ( IRType ty );
3266 
3267 
3268 /*---------------------------------------------------------------*/
3269 /*--- IR injection                                            ---*/
3270 /*---------------------------------------------------------------*/
3271 
3272 void vex_inject_ir(IRSB *, IREndness);
3273 
3274 
3275 #endif /* ndef __LIBVEX_IR_H */
3276 
3277 /*---------------------------------------------------------------*/
3278 /*---                                             libvex_ir.h ---*/
3279 /*---------------------------------------------------------------*/