|
|
|||
File indexing completed on 2025-12-16 10:32:57
0001 0002 /*---------------------------------------------------------------*/ 0003 /*--- begin libvex.h ---*/ 0004 /*---------------------------------------------------------------*/ 0005 0006 /* 0007 This file is part of Valgrind, a dynamic binary instrumentation 0008 framework. 0009 0010 Copyright (C) 2004-2017 OpenWorks LLP 0011 info@open-works.net 0012 0013 This program is free software; you can redistribute it and/or 0014 modify it under the terms of the GNU General Public License as 0015 published by the Free Software Foundation; either version 2 of the 0016 License, or (at your option) any later version. 0017 0018 This program is distributed in the hope that it will be useful, but 0019 WITHOUT ANY WARRANTY; without even the implied warranty of 0020 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 0021 General Public License for more details. 0022 0023 You should have received a copy of the GNU General Public License 0024 along with this program; if not, see <http://www.gnu.org/licenses/>. 0025 0026 The GNU General Public License is contained in the file COPYING. 0027 0028 Neither the names of the U.S. Department of Energy nor the 0029 University of California nor the names of its contributors may be 0030 used to endorse or promote products derived from this software 0031 without prior written permission. 0032 */ 0033 0034 #ifndef __LIBVEX_H 0035 #define __LIBVEX_H 0036 0037 0038 #include "libvex_basictypes.h" 0039 #include "libvex_ir.h" 0040 0041 0042 /*---------------------------------------------------------------*/ 0043 /*--- This file defines the top-level interface to LibVEX. ---*/ 0044 /*---------------------------------------------------------------*/ 0045 0046 /*-------------------------------------------------------*/ 0047 /*--- Architectures, variants, and other arch info ---*/ 0048 /*-------------------------------------------------------*/ 0049 0050 typedef 0051 enum { 0052 VexArch_INVALID=0x400, 0053 VexArchX86, 0054 VexArchAMD64, 0055 VexArchARM, 0056 VexArchARM64, 0057 VexArchPPC32, 0058 VexArchPPC64, 0059 VexArchS390X, 0060 VexArchMIPS32, 0061 VexArchMIPS64, 0062 VexArchNANOMIPS, 0063 } 0064 VexArch; 0065 0066 0067 /* Information about endianness. */ 0068 typedef 0069 enum { 0070 VexEndness_INVALID=0x600, /* unknown endianness */ 0071 VexEndnessLE, /* little endian */ 0072 VexEndnessBE /* big endian */ 0073 } 0074 VexEndness; 0075 0076 0077 /* For a given architecture, these specify extra capabilities beyond 0078 the minimum supported (baseline) capabilities. They may be OR'd 0079 together, although some combinations don't make sense. (eg, SSE2 0080 but not SSE1). LibVEX_Translate will check for nonsensical 0081 combinations. */ 0082 0083 /* x86: baseline capability is Pentium-1 (FPU, MMX, but no SSE), with 0084 cmpxchg8b. MMXEXT is a special AMD only subset of SSE1 (Integer SSE). */ 0085 #define VEX_HWCAPS_X86_MMXEXT (1<<1) /* A subset of SSE1 on early AMD */ 0086 #define VEX_HWCAPS_X86_SSE1 (1<<2) /* SSE1 support (Pentium III) */ 0087 #define VEX_HWCAPS_X86_SSE2 (1<<3) /* SSE2 support (Pentium 4) */ 0088 #define VEX_HWCAPS_X86_SSE3 (1<<4) /* SSE3 support (>= Prescott) */ 0089 #define VEX_HWCAPS_X86_LZCNT (1<<5) /* SSE4a LZCNT insn */ 0090 0091 /* amd64: baseline capability is SSE2, with cmpxchg8b but not 0092 cmpxchg16b. */ 0093 #define VEX_HWCAPS_AMD64_SSE3 (1<<5) /* SSE3 support */ 0094 #define VEX_HWCAPS_AMD64_SSSE3 (1<<12) /* Supplemental SSE3 support */ 0095 #define VEX_HWCAPS_AMD64_CX16 (1<<6) /* cmpxchg16b support */ 0096 #define VEX_HWCAPS_AMD64_LZCNT (1<<7) /* SSE4a LZCNT insn */ 0097 #define VEX_HWCAPS_AMD64_AVX (1<<8) /* AVX instructions */ 0098 #define VEX_HWCAPS_AMD64_RDTSCP (1<<9) /* RDTSCP instruction */ 0099 #define VEX_HWCAPS_AMD64_BMI (1<<10) /* BMI1 instructions */ 0100 #define VEX_HWCAPS_AMD64_AVX2 (1<<11) /* AVX2 instructions */ 0101 #define VEX_HWCAPS_AMD64_RDRAND (1<<13) /* RDRAND instructions */ 0102 #define VEX_HWCAPS_AMD64_F16C (1<<14) /* F16C instructions */ 0103 #define VEX_HWCAPS_AMD64_RDSEED (1<<15) /* RDSEED instructions */ 0104 #define VEX_HWCAPS_AMD64_FMA3 (1<<16) /* FMA3 instructions */ 0105 #define VEX_HWCAPS_AMD64_FMA4 (1<<17) /* FMA4 instructions */ 0106 0107 /* ppc32: baseline capability is integer only */ 0108 #define VEX_HWCAPS_PPC32_F (1<<8) /* basic (non-optional) FP */ 0109 #define VEX_HWCAPS_PPC32_V (1<<9) /* Altivec (VMX) */ 0110 #define VEX_HWCAPS_PPC32_FX (1<<10) /* FP extns (fsqrt, fsqrts) */ 0111 #define VEX_HWCAPS_PPC32_GX (1<<11) /* Graphics extns 0112 (fres,frsqrte,fsel,stfiwx) */ 0113 #define VEX_HWCAPS_PPC32_VX (1<<12) /* Vector-scalar floating-point (VSX); implies ISA 2.06 or higher */ 0114 #define VEX_HWCAPS_PPC32_DFP (1<<17) /* Decimal Floating Point (DFP) -- e.g., dadd */ 0115 #define VEX_HWCAPS_PPC32_ISA2_07 (1<<19) /* ISA 2.07 -- e.g., mtvsrd */ 0116 #define VEX_HWCAPS_PPC32_ISA3_0 (1<<21) /* ISA 3.0 -- e.g., cnttzw */ 0117 #define VEX_HWCAPS_PPC32_ISA3_1 (1<<22) /* ISA 3.1 -- e.g., brh */ 0118 /* ISA 3.1 not supported in 32-bit mode */ 0119 0120 /* ppc64: baseline capability is integer and basic FP insns */ 0121 #define VEX_HWCAPS_PPC64_V (1<<13) /* Altivec (VMX) */ 0122 #define VEX_HWCAPS_PPC64_FX (1<<14) /* FP extns (fsqrt, fsqrts) */ 0123 #define VEX_HWCAPS_PPC64_GX (1<<15) /* Graphics extns 0124 (fres,frsqrte,fsel,stfiwx) */ 0125 #define VEX_HWCAPS_PPC64_VX (1<<16) /* Vector-scalar floating-point (VSX); implies ISA 2.06 or higher */ 0126 #define VEX_HWCAPS_PPC64_DFP (1<<18) /* Decimal Floating Point (DFP) -- e.g., dadd */ 0127 #define VEX_HWCAPS_PPC64_ISA2_07 (1<<20) /* ISA 2.07 -- e.g., mtvsrd */ 0128 #define VEX_HWCAPS_PPC64_ISA3_0 (1<<22) /* ISA 3.0 -- e.g., cnttzw */ 0129 #define VEX_HWCAPS_PPC64_ISA3_1 (1<<23) /* ISA 3.1 -- e.g., brh */ 0130 #define VEX_HWCAPS_PPC64_SCV (1<<24) /* ISA 3.0, Kernel supports scv 0131 instruction. */ 0132 0133 /* s390x: Hardware capability encoding 0134 0135 Bits [26:31] encode the machine model (see VEX_S390X_MODEL... below) 0136 Bits [0:20] encode specific hardware capabilities 0137 (see VEX_HWAPS_S390X_... below) 0138 */ 0139 0140 /* Model numbers must be assigned in chronological order. 0141 They are used as array index. */ 0142 #define VEX_S390X_MODEL_Z900 0 0143 #define VEX_S390X_MODEL_Z800 1 0144 #define VEX_S390X_MODEL_Z990 2 0145 #define VEX_S390X_MODEL_Z890 3 0146 #define VEX_S390X_MODEL_Z9_EC 4 0147 #define VEX_S390X_MODEL_Z9_BC 5 0148 #define VEX_S390X_MODEL_Z10_EC 6 0149 #define VEX_S390X_MODEL_Z10_BC 7 0150 #define VEX_S390X_MODEL_Z196 8 0151 #define VEX_S390X_MODEL_Z114 9 0152 #define VEX_S390X_MODEL_ZEC12 10 0153 #define VEX_S390X_MODEL_ZBC12 11 0154 #define VEX_S390X_MODEL_Z13 12 0155 #define VEX_S390X_MODEL_Z13S 13 0156 #define VEX_S390X_MODEL_Z14 14 0157 #define VEX_S390X_MODEL_Z14_ZR1 15 0158 #define VEX_S390X_MODEL_Z15 16 0159 #define VEX_S390X_MODEL_Z16 17 0160 #define VEX_S390X_MODEL_UNKNOWN 18 /* always last in list */ 0161 #define VEX_S390X_MODEL_MASK 0x3F 0162 0163 #define VEX_HWCAPS_S390X_LDISP (1<<6) /* Long-displacement facility */ 0164 #define VEX_HWCAPS_S390X_EIMM (1<<7) /* Extended-immediate facility */ 0165 #define VEX_HWCAPS_S390X_GIE (1<<8) /* General-instruction-extension facility */ 0166 #define VEX_HWCAPS_S390X_DFP (1<<9) /* Decimal floating point facility */ 0167 #define VEX_HWCAPS_S390X_FGX (1<<10) /* FPR-GR transfer facility */ 0168 #define VEX_HWCAPS_S390X_ETF2 (1<<11) /* ETF2-enhancement facility */ 0169 #define VEX_HWCAPS_S390X_STFLE (1<<12) /* STFLE facility */ 0170 #define VEX_HWCAPS_S390X_ETF3 (1<<13) /* ETF3-enhancement facility */ 0171 #define VEX_HWCAPS_S390X_STCKF (1<<14) /* STCKF facility */ 0172 #define VEX_HWCAPS_S390X_FPEXT (1<<15) /* Floating point extension facility */ 0173 #define VEX_HWCAPS_S390X_LSC (1<<16) /* Conditional load/store facility */ 0174 #define VEX_HWCAPS_S390X_PFPO (1<<17) /* Perform floating point ops facility */ 0175 #define VEX_HWCAPS_S390X_VX (1<<18) /* Vector facility */ 0176 #define VEX_HWCAPS_S390X_MSA5 (1<<19) /* message security assistance facility */ 0177 #define VEX_HWCAPS_S390X_MI2 (1<<20) /* miscellaneous-instruction-extensions facility 2 */ 0178 #define VEX_HWCAPS_S390X_LSC2 (1<<21) /* Conditional load/store facility2 */ 0179 #define VEX_HWCAPS_S390X_VXE (1<<22) /* Vector-enhancements facility */ 0180 #define VEX_HWCAPS_S390X_NNPA (1<<23) /* NNPA facility */ 0181 #define VEX_HWCAPS_S390X_DFLT (1<<24) /* Deflate-conversion facility */ 0182 0183 /* Special value representing all available s390x hwcaps */ 0184 #define VEX_HWCAPS_S390X_ALL (VEX_HWCAPS_S390X_LDISP | \ 0185 VEX_HWCAPS_S390X_EIMM | \ 0186 VEX_HWCAPS_S390X_GIE | \ 0187 VEX_HWCAPS_S390X_DFP | \ 0188 VEX_HWCAPS_S390X_FGX | \ 0189 VEX_HWCAPS_S390X_STFLE | \ 0190 VEX_HWCAPS_S390X_STCKF | \ 0191 VEX_HWCAPS_S390X_FPEXT | \ 0192 VEX_HWCAPS_S390X_LSC | \ 0193 VEX_HWCAPS_S390X_ETF3 | \ 0194 VEX_HWCAPS_S390X_ETF2 | \ 0195 VEX_HWCAPS_S390X_PFPO | \ 0196 VEX_HWCAPS_S390X_VX | \ 0197 VEX_HWCAPS_S390X_MSA5 | \ 0198 VEX_HWCAPS_S390X_MI2 | \ 0199 VEX_HWCAPS_S390X_LSC2 | \ 0200 VEX_HWCAPS_S390X_VXE | \ 0201 VEX_HWCAPS_S390X_NNPA | \ 0202 VEX_HWCAPS_S390X_DFLT) 0203 0204 #define VEX_HWCAPS_S390X(x) ((x) & ~VEX_S390X_MODEL_MASK) 0205 #define VEX_S390X_MODEL(x) ((x) & VEX_S390X_MODEL_MASK) 0206 0207 /* arm: baseline capability is ARMv4 */ 0208 /* Bits 5:0 - architecture level (e.g. 5 for v5, 6 for v6 etc) */ 0209 #define VEX_HWCAPS_ARM_VFP (1<<6) /* VFP extension */ 0210 #define VEX_HWCAPS_ARM_VFP2 (1<<7) /* VFPv2 */ 0211 #define VEX_HWCAPS_ARM_VFP3 (1<<8) /* VFPv3 */ 0212 /* Bits 15:10 reserved for (possible) future VFP revisions */ 0213 #define VEX_HWCAPS_ARM_NEON (1<<16) /* Advanced SIMD also known as NEON */ 0214 0215 /* Get an ARM architecure level from HWCAPS */ 0216 #define VEX_ARM_ARCHLEVEL(x) ((x) & 0x3f) 0217 0218 /* ARM64: baseline capability is AArch64 v8. */ 0219 #define VEX_HWCAPS_ARM64_FHM (1 << 4) 0220 #define VEX_HWCAPS_ARM64_DPBCVAP (1 << 5) 0221 #define VEX_HWCAPS_ARM64_DPBCVADP (1 << 6) 0222 #define VEX_HWCAPS_ARM64_SM3 (1 << 7) 0223 #define VEX_HWCAPS_ARM64_SM4 (1 << 8) 0224 #define VEX_HWCAPS_ARM64_SHA3 (1 << 9) 0225 #define VEX_HWCAPS_ARM64_RDM (1 << 10) 0226 #define VEX_HWCAPS_ARM64_ATOMICS (1 << 11) 0227 #define VEX_HWCAPS_ARM64_I8MM (1 << 12) 0228 #define VEX_HWCAPS_ARM64_BF16 (1 << 13) 0229 #define VEX_HWCAPS_ARM64_FP16 (1 << 14) 0230 #define VEX_HWCAPS_ARM64_VFP16 (1 << 15) 0231 0232 /* MIPS baseline capability */ 0233 /* Assigned Company values for bits 23:16 of the PRId Register 0234 (CP0 register 15, select 0). As of the MIPS32 and MIPS64 specs from 0235 MTI, the PRId register is defined in this (backwards compatible) 0236 way: 0237 0238 +----------------+----------------+----------------+----------------+ 0239 | Company Options| Company ID | Processor ID | Revision | 0240 +----------------+----------------+----------------+----------------+ 0241 31 24 23 16 15 8 7 0242 0243 */ 0244 0245 #define VEX_PRID_COMP_LEGACY 0x00000000 0246 #define VEX_PRID_COMP_MIPS 0x00010000 0247 #define VEX_PRID_COMP_BROADCOM 0x00020000 0248 #define VEX_PRID_COMP_NETLOGIC 0x000C0000 0249 #define VEX_PRID_COMP_CAVIUM 0x000D0000 0250 #define VEX_PRID_COMP_INGENIC_E1 0x00E10000 /* JZ4780 */ 0251 0252 /* 0253 * These are valid when 23:16 == PRID_COMP_LEGACY 0254 */ 0255 #define VEX_PRID_IMP_LOONGSON_64 0x6300 /* Loongson-2/3 */ 0256 0257 /* 0258 * These are the PRID's for when 23:16 == PRID_COMP_MIPS 0259 */ 0260 #define VEX_PRID_IMP_34K 0x9500 0261 #define VEX_PRID_IMP_74K 0x9700 0262 #define VEX_PRID_IMP_P5600 0xa800 0263 0264 /* 0265 * Instead of Company Options values, bits 31:24 will be packed with 0266 * additional information, such as isa level and FP mode. 0267 */ 0268 #define VEX_MIPS_CPU_ISA_M32R1 0x01000000 0269 #define VEX_MIPS_CPU_ISA_M32R2 0x02000000 0270 #define VEX_MIPS_CPU_ISA_M64R1 0x04000000 0271 #define VEX_MIPS_CPU_ISA_M64R2 0x08000000 0272 #define VEX_MIPS_CPU_ISA_M32R6 0x10000000 0273 #define VEX_MIPS_CPU_ISA_M64R6 0x20000000 0274 /* FP mode is FR = 1 (32 dbl. prec. FP registers) */ 0275 #define VEX_MIPS_HOST_FR 0x40000000 0276 /* Get MIPS Extended Information */ 0277 #define VEX_MIPS_EX_INFO(x) ((x) & 0xFF000000) 0278 /* Get MIPS Company ID from HWCAPS */ 0279 #define VEX_MIPS_COMP_ID(x) ((x) & 0x00FF0000) 0280 /* Get MIPS Processor ID from HWCAPS */ 0281 #define VEX_MIPS_PROC_ID(x) ((x) & 0x0000FF00) 0282 /* Get MIPS Revision from HWCAPS */ 0283 #define VEX_MIPS_REV(x) ((x) & 0x000000FF) 0284 /* Get host FP mode */ 0285 #define VEX_MIPS_HOST_FP_MODE(x) (!!(VEX_MIPS_EX_INFO(x) & VEX_MIPS_HOST_FR)) 0286 /* Check if the processor supports MIPS32R2. */ 0287 #define VEX_MIPS_CPU_HAS_MIPS32R2(x) (VEX_MIPS_EX_INFO(x) & \ 0288 VEX_MIPS_CPU_ISA_M32R2) 0289 /* Check if the processor supports MIPS64R2. */ 0290 #define VEX_MIPS_CPU_HAS_MIPS64R2(x) (VEX_MIPS_EX_INFO(x) & \ 0291 VEX_MIPS_CPU_ISA_M64R2) 0292 /* Check if the processor supports MIPSR6. */ 0293 #define VEX_MIPS_CPU_HAS_MIPSR6(x) (VEX_MIPS_EX_INFO(x) & \ 0294 (VEX_MIPS_CPU_ISA_M32R6 | \ 0295 VEX_MIPS_CPU_ISA_M64R6)) 0296 /* Check if the processor supports DSP ASE Rev 2. */ 0297 #define VEX_MIPS_PROC_DSP2(x) ((VEX_MIPS_COMP_ID(x) == VEX_PRID_COMP_MIPS) && \ 0298 (VEX_MIPS_PROC_ID(x) == VEX_PRID_IMP_74K)) 0299 /* Check if the processor supports DSP ASE Rev 1. */ 0300 #define VEX_MIPS_PROC_DSP(x) (VEX_MIPS_PROC_DSP2(x) || \ 0301 ((VEX_MIPS_COMP_ID(x) == VEX_PRID_COMP_MIPS) && \ 0302 (VEX_MIPS_PROC_ID(x) == VEX_PRID_IMP_34K))) 0303 0304 /* Check if the processor supports MIPS MSA (SIMD)*/ 0305 #define VEX_MIPS_PROC_MSA(x) ((VEX_MIPS_COMP_ID(x) == VEX_PRID_COMP_MIPS) && \ 0306 (VEX_MIPS_PROC_ID(x) == VEX_PRID_IMP_P5600) && \ 0307 (VEX_MIPS_HOST_FP_MODE(x))) 0308 0309 /* These return statically allocated strings. */ 0310 0311 extern const HChar* LibVEX_ppVexArch ( VexArch ); 0312 extern const HChar* LibVEX_ppVexEndness ( VexEndness endness ); 0313 extern const HChar* LibVEX_ppVexHwCaps ( VexArch, UInt ); 0314 0315 0316 /* The various kinds of caches */ 0317 typedef enum { 0318 DATA_CACHE=0x500, 0319 INSN_CACHE, 0320 UNIFIED_CACHE 0321 } VexCacheKind; 0322 0323 /* Information about a particular cache */ 0324 typedef struct { 0325 VexCacheKind kind; 0326 UInt level; /* level this cache is at, e.g. 1 for L1 cache */ 0327 UInt sizeB; /* size of this cache in bytes */ 0328 UInt line_sizeB; /* cache line size in bytes */ 0329 UInt assoc; /* set associativity */ 0330 Bool is_trace_cache; /* False, except for certain Pentium 4 models */ 0331 } VexCache; 0332 0333 /* Convenience macro to initialise a VexCache */ 0334 #define VEX_CACHE_INIT(_kind, _level, _size, _line_size, _assoc) \ 0335 ({ (VexCache) { .kind = _kind, .level = _level, .sizeB = _size, \ 0336 .line_sizeB = _line_size, .assoc = _assoc, \ 0337 .is_trace_cache = False }; }) 0338 0339 /* Information about the cache system as a whole */ 0340 typedef struct { 0341 UInt num_levels; 0342 UInt num_caches; 0343 /* Unordered array of caches for this host. NULL if there are 0344 no caches. The following can always be assumed: 0345 (1) There is at most one cache of a given kind per cache level. 0346 (2) If there exists a unified cache at a particular level then 0347 no other cache exists at that level. 0348 (3) The existence of a cache at level N > 1 implies the existence of 0349 at least one cache at level N-1. */ 0350 VexCache *caches; 0351 Bool icaches_maintain_coherence; 0352 } VexCacheInfo; 0353 0354 0355 /* This struct is a bit of a hack, but is needed to carry misc 0356 important bits of info about an arch. Fields which are meaningless 0357 or ignored for the platform in question should be set to zero. 0358 Nb: if you add fields to the struct make sure to update function 0359 LibVEX_default_VexArchInfo. */ 0360 0361 typedef 0362 struct { 0363 /* The following three fields are mandatory. */ 0364 UInt hwcaps; 0365 VexEndness endness; 0366 VexCacheInfo hwcache_info; 0367 /* PPC32/PPC64 only: size of instruction cache line */ 0368 Int ppc_icache_line_szB; 0369 /* PPC32/PPC64 only: sizes zeroed by the dcbz/dcbzl instructions 0370 (bug#135264) */ 0371 UInt ppc_dcbz_szB; 0372 /* PPC32/PPC64 only: True scv is supported */ 0373 Bool ppc_scv_supported; 0374 UInt ppc_dcbzl_szB; /* 0 means unsupported (SIGILL) */ 0375 /* ARM64: I- and D- minimum line sizes in log2(bytes), as 0376 obtained from ctr_el0.DminLine and .IminLine. For example, a 0377 line size of 64 bytes would be encoded here as 6. */ 0378 UInt arm64_dMinLine_lg2_szB; 0379 UInt arm64_iMinLine_lg2_szB; 0380 UChar arm64_cache_block_size; 0381 /* ARM64: does the host require us to use the fallback LLSC 0382 implementation? */ 0383 Bool arm64_requires_fallback_LLSC; 0384 } 0385 VexArchInfo; 0386 0387 /* Write default settings info *vai. */ 0388 extern 0389 void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai ); 0390 0391 0392 /* This struct carries guest and host ABI variant information that may 0393 be needed. Fields which are meaningless or ignored for the 0394 platform in question should be set to zero. 0395 0396 Settings which are believed to be correct are: 0397 0398 guest_stack_redzone_size 0399 guest is ppc32-linux ==> 0 0400 guest is ppc64-linux ==> 288 0401 guest is amd64-linux ==> 128 0402 guest is other ==> inapplicable 0403 0404 guest_amd64_assume_fs_is_const 0405 guest is amd64-linux ==> True 0406 guest is amd64-darwin ==> False 0407 guest is amd64-solaris ==> True 0408 guest is other ==> inapplicable 0409 0410 guest_amd64_assume_gs_is_const 0411 guest is amd64-darwin ==> True 0412 guest is amd64-linux ==> True 0413 guest is amd64-solaris ==> False 0414 guest is other ==> inapplicable 0415 0416 guest_ppc_zap_RZ_at_blr 0417 guest is ppc64-linux ==> True 0418 guest is ppc32-linux ==> False 0419 guest is other ==> inapplicable 0420 0421 guest_ppc_zap_RZ_at_bl 0422 guest is ppc64-linux ==> const True 0423 guest is ppc32-linux ==> const False 0424 guest is other ==> inapplicable 0425 0426 guest__use_fallback_LLSC 0427 guest is mips32 ==> applicable, default True 0428 guest is mips64 ==> applicable, default True 0429 guest is arm64 ==> applicable, default False 0430 0431 host_ppc_calls_use_fndescrs: 0432 host is ppc32-linux ==> False 0433 host is ppc64-linux ==> True 0434 host is other ==> inapplicable 0435 */ 0436 0437 typedef 0438 struct { 0439 /* PPC and AMD64 GUESTS only: how many bytes below the 0440 stack pointer are validly addressible? */ 0441 Int guest_stack_redzone_size; 0442 0443 /* AMD64 GUESTS only: should we translate %fs-prefixed 0444 instructions using the assumption that %fs always contains 0445 the same value? (typically zero on linux and solaris) */ 0446 Bool guest_amd64_assume_fs_is_const; 0447 0448 /* AMD64 GUESTS only: should we translate %gs-prefixed 0449 instructions using the assumption that %gs always contains 0450 the same value? (typically 0x60 on darwin)? */ 0451 Bool guest_amd64_assume_gs_is_const; 0452 0453 /* AMD64 GUESTS only: for a misaligned memory access, for which we should 0454 generate a trap, should we generate SigBUS (a la FreeBSD) or SIGSEGV 0455 (Linux, OSX) ?? */ 0456 Bool guest_amd64_sigbus_on_misalign; 0457 0458 /* PPC GUESTS only: should we zap the stack red zone at a 'blr' 0459 (function return) ? */ 0460 Bool guest_ppc_zap_RZ_at_blr; 0461 0462 /* PPC GUESTS only: should we zap the stack red zone at a 'bl' 0463 (function call) ? Is supplied with the guest address of the 0464 target of the call since that may be significant. If NULL, 0465 is assumed equivalent to a fn which always returns False. */ 0466 Bool (*guest_ppc_zap_RZ_at_bl)(Addr); 0467 0468 /* Potentially for all guests that use LL/SC: use the fallback 0469 (synthesised) implementation rather than passing LL/SC on to 0470 the host? */ 0471 Bool guest__use_fallback_LLSC; 0472 0473 /* PPC32/PPC64 HOSTS only: does '&f' give us a pointer to a 0474 function descriptor on the host, or to the function code 0475 itself? True => descriptor, False => code. */ 0476 Bool host_ppc_calls_use_fndescrs; 0477 0478 /* MIPS32/MIPS64 GUESTS only: emulated FPU mode. */ 0479 UInt guest_mips_fp_mode; 0480 } 0481 VexAbiInfo; 0482 0483 /* Write default settings info *vbi. */ 0484 extern 0485 void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi ); 0486 0487 0488 /*-------------------------------------------------------*/ 0489 /*--- Control of Vex's optimiser (iropt). ---*/ 0490 /*-------------------------------------------------------*/ 0491 0492 0493 /* VexRegisterUpdates specifies when to ensure that the guest state is 0494 up to date, in order of increasing accuracy but increasing expense. 0495 0496 VexRegUpdSpAtMemAccess: all registers are updated at superblock 0497 exits, and SP is also up to date at memory exception points. The 0498 SP is described by the arch specific functions 0499 guest_<arch>_state_requires_precise_mem_exns. 0500 0501 VexRegUpdUnwindregsAtMemAccess: registers needed to make a stack 0502 trace are up to date at memory exception points. Typically, 0503 these are PC/SP/FP. The minimal registers are described by the 0504 arch specific functions guest_<arch>_state_requires_precise_mem_exns. 0505 This is what Valgrind sets as the default. 0506 0507 VexRegUpdAllregsAtMemAccess: all registers up to date at memory 0508 exception points. This is what normally might be considered as 0509 providing "precise exceptions for memory", but does not 0510 necessarily provide precise register values at any other kind of 0511 exception. 0512 0513 VexRegUpdAllregsAtEachInsn: all registers up to date at each 0514 instruction. 0515 */ 0516 typedef 0517 enum { 0518 VexRegUpd_INVALID=0x700, 0519 VexRegUpdSpAtMemAccess, 0520 VexRegUpdUnwindregsAtMemAccess, 0521 VexRegUpdAllregsAtMemAccess, 0522 VexRegUpdAllregsAtEachInsn 0523 } 0524 VexRegisterUpdates; 0525 0526 /* Control of Vex's optimiser. */ 0527 0528 typedef 0529 struct { 0530 /* Controls verbosity of iropt. 0 = no output. */ 0531 Int iropt_verbosity; 0532 /* Control aggressiveness of iropt. 0 = no opt, 1 = simple 0533 opts, 2 (default) = max optimisation. */ 0534 Int iropt_level; 0535 /* Controls when registers are updated in guest state. Note 0536 that this is the default value. The VEX client can override 0537 this on a per-IRSB basis if it wants. bb_to_IR() will query 0538 the client to ask if it wants a different setting for the 0539 block under construction, and that new setting is transported 0540 back to LibVEX_Translate, which feeds it to iropt via the 0541 various do_iropt_BB calls. */ 0542 VexRegisterUpdates iropt_register_updates_default; 0543 /* How aggressive should iropt be in unrolling loops? Higher 0544 numbers make it more enthusiastic about loop unrolling. 0545 Default=120. A setting of zero disables unrolling. */ 0546 Int iropt_unroll_thresh; 0547 /* What's the maximum basic block length the front end(s) allow? 0548 BBs longer than this are split up. Default=60 (guest 0549 insns). */ 0550 Int guest_max_insns; 0551 /* Should Vex try to construct superblocks, by chasing unconditional 0552 branches/calls to known destinations, and performing AND/OR idiom 0553 recognition? It is recommended to set this to True as that possibly 0554 improves performance a bit, and also is important for avoiding certain 0555 kinds of false positives in Memcheck. Default=True. */ 0556 Bool guest_chase; 0557 /* Register allocator version. Allowed values are: 0558 - '2': previous, good and slow implementation. 0559 - '3': current, faster implementation; perhaps producing slightly worse 0560 spilling decisions. */ 0561 UInt regalloc_version; 0562 } 0563 VexControl; 0564 0565 0566 /* Write the default settings into *vcon. */ 0567 0568 extern 0569 void LibVEX_default_VexControl ( /*OUT*/ VexControl* vcon ); 0570 0571 0572 /*-------------------------------------------------------*/ 0573 /*--- Storage management control ---*/ 0574 /*-------------------------------------------------------*/ 0575 0576 /* Allocate in Vex's temporary allocation area. Be careful with this. 0577 You can only call it inside an instrumentation or optimisation 0578 callback that you have previously specified in a call to 0579 LibVEX_Translate. The storage allocated will only stay alive until 0580 translation of the current basic block is complete. */ 0581 extern void* LibVEX_Alloc ( SizeT nbytes ); 0582 0583 /* Show Vex allocation statistics. */ 0584 extern void LibVEX_ShowAllocStats ( void ); 0585 0586 0587 /*-------------------------------------------------------*/ 0588 /*--- Describing guest state layout ---*/ 0589 /*-------------------------------------------------------*/ 0590 0591 /* Describe the guest state enough that the instrumentation 0592 functions can work. */ 0593 0594 /* The max number of guest state chunks which we can describe as 0595 always defined (for the benefit of Memcheck). */ 0596 #define VEXGLO_N_ALWAYSDEFD 24 0597 0598 typedef 0599 struct { 0600 /* Total size of the guest state, in bytes. Must be 0601 16-aligned. */ 0602 Int total_sizeB; 0603 /* Whereabouts is the stack pointer? */ 0604 Int offset_SP; 0605 Int sizeof_SP; /* 4 or 8 */ 0606 /* Whereabouts is the frame pointer? */ 0607 Int offset_FP; 0608 Int sizeof_FP; /* 4 or 8 */ 0609 /* Whereabouts is the instruction pointer? */ 0610 Int offset_IP; 0611 Int sizeof_IP; /* 4 or 8 */ 0612 /* Describe parts of the guest state regarded as 'always 0613 defined'. */ 0614 Int n_alwaysDefd; 0615 struct { 0616 Int offset; 0617 Int size; 0618 } alwaysDefd[VEXGLO_N_ALWAYSDEFD]; 0619 } 0620 VexGuestLayout; 0621 0622 /* A note about guest state layout. 0623 0624 LibVEX defines the layout for the guest state, in the file 0625 pub/libvex_guest_<arch>.h. The struct will have an 16-aligned 0626 size. Each translated bb is assumed to be entered with a specified 0627 register pointing at such a struct. Beyond that is two copies of 0628 the shadow state area with the same size as the struct. Beyond 0629 that is a spill area that LibVEX may spill into. It must have size 0630 LibVEX_N_SPILL_BYTES, and this must be a 16-aligned number. 0631 0632 On entry, the baseblock pointer register must be 16-aligned. 0633 0634 There must be no holes in between the primary guest state, its two 0635 copies, and the spill area. In short, all 4 areas must have a 0636 16-aligned size and be 16-aligned, and placed back-to-back. 0637 */ 0638 0639 #define LibVEX_N_SPILL_BYTES 4096 0640 0641 /* The size of the guest state must be a multiple of this number. */ 0642 #define LibVEX_GUEST_STATE_ALIGN 16 0643 0644 /*-------------------------------------------------------*/ 0645 /*--- Initialisation of the library ---*/ 0646 /*-------------------------------------------------------*/ 0647 0648 /* Initialise the library. You must call this first. */ 0649 0650 extern void LibVEX_Init ( 0651 0652 /* failure exit function */ 0653 # if defined(__cplusplus) && defined(__GNUC__) && __GNUC__ <= 3 0654 /* g++ 3.x doesn't understand attributes on function parameters. 0655 See #265762. */ 0656 # else 0657 __attribute__ ((noreturn)) 0658 # endif 0659 void (*failure_exit) ( void ), 0660 0661 /* logging output function */ 0662 void (*log_bytes) ( const HChar*, SizeT nbytes ), 0663 0664 /* debug paranoia level */ 0665 Int debuglevel, 0666 0667 /* Control ... */ 0668 const VexControl* vcon 0669 ); 0670 0671 0672 /*-------------------------------------------------------*/ 0673 /*--- Make a translation ---*/ 0674 /*-------------------------------------------------------*/ 0675 0676 /* Describes the outcome of a translation attempt. */ 0677 typedef 0678 struct { 0679 /* overall status */ 0680 enum { VexTransOK=0x800, 0681 VexTransAccessFail, VexTransOutputFull } status; 0682 /* The number of extents that have a self-check (0 to 3) */ 0683 UInt n_sc_extents; 0684 /* Offset in generated code of the profile inc, or -1 if 0685 none. Needed for later patching. */ 0686 Int offs_profInc; 0687 /* Stats only: the number of guest insns included in the 0688 translation. It may be zero (!). */ 0689 UInt n_guest_instrs; 0690 /* Stats only: the number of unconditional branches incorporated into the 0691 trace. */ 0692 UShort n_uncond_in_trace; 0693 /* Stats only: the number of conditional branches incorporated into the 0694 trace. */ 0695 UShort n_cond_in_trace; 0696 } 0697 VexTranslateResult; 0698 0699 0700 /* Describes precisely the pieces of guest code that a translation 0701 covers. Now that Vex can chase across BB boundaries, the old 0702 scheme of describing a chunk of guest code merely by its start 0703 address and length is inadequate. 0704 0705 This struct uses 20 bytes on a 32-bit archtecture and 32 bytes on a 0706 64-bit architecture. Space is important as clients will have to store 0707 one of these for each translation made. 0708 */ 0709 typedef 0710 struct { 0711 Addr base[3]; 0712 UShort len[3]; 0713 UShort n_used; 0714 } 0715 VexGuestExtents; 0716 0717 0718 /* A structure to carry arguments for LibVEX_Translate. There are so 0719 many of them, it seems better to have a structure. */ 0720 typedef 0721 struct { 0722 /* IN: The instruction sets we are translating from and to. And 0723 guest/host misc info. */ 0724 VexArch arch_guest; 0725 VexArchInfo archinfo_guest; 0726 VexArch arch_host; 0727 VexArchInfo archinfo_host; 0728 VexAbiInfo abiinfo_both; 0729 0730 /* IN: an opaque value which is passed as the first arg to all 0731 callback functions supplied in this struct. Vex has no idea 0732 what's at the other end of this pointer. */ 0733 void* callback_opaque; 0734 0735 /* IN: the block to translate, and its guest address. */ 0736 /* where are the actual bytes in the host's address space? */ 0737 const UChar* guest_bytes; 0738 /* where do the bytes really come from in the guest's aspace? 0739 This is the post-redirection guest address. Not that Vex 0740 understands anything about redirection; that is all done on 0741 the Valgrind side. */ 0742 Addr guest_bytes_addr; 0743 0744 /* Is it OK to chase into this guest address? May not be 0745 NULL. */ 0746 Bool (*chase_into_ok) ( /*callback_opaque*/void*, Addr ); 0747 0748 /* OUT: which bits of guest code actually got translated */ 0749 VexGuestExtents* guest_extents; 0750 0751 /* IN: a place to put the resulting code, and its size */ 0752 UChar* host_bytes; 0753 Int host_bytes_size; 0754 /* OUT: how much of the output area is used. */ 0755 Int* host_bytes_used; 0756 0757 /* IN: optionally, two instrumentation functions. May be 0758 NULL. */ 0759 IRSB* (*instrument1) ( /*callback_opaque*/void*, 0760 IRSB*, 0761 const VexGuestLayout*, 0762 const VexGuestExtents*, 0763 const VexArchInfo*, 0764 IRType gWordTy, IRType hWordTy ); 0765 IRSB* (*instrument2) ( /*callback_opaque*/void*, 0766 IRSB*, 0767 const VexGuestLayout*, 0768 const VexGuestExtents*, 0769 const VexArchInfo*, 0770 IRType gWordTy, IRType hWordTy ); 0771 0772 IRSB* (*finaltidy) ( IRSB* ); 0773 0774 /* IN: a callback used to ask the caller which of the extents, 0775 if any, a self check is required for. Must not be NULL. 0776 The returned value is a bitmask with a 1 in position i indicating 0777 that the i'th extent needs a check. Since there can be at most 0778 3 extents, the returned values must be between 0 and 7. 0779 0780 This call also gives the VEX client the opportunity to change 0781 the precision of register update preservation as performed by 0782 the IR optimiser. Before the call, VEX will set *pxControl 0783 to hold the default register-update status value as specified 0784 by VexControl::iropt_register_updates_default as passed to 0785 LibVEX_Init at library initialisation time. The client (in 0786 this callback) can if it wants, inspect the value and change 0787 it to something different, and that value will be used for 0788 subsequent IR optimisation of the block. */ 0789 UInt (*needs_self_check)( /*callback_opaque*/void*, 0790 /*MAYBE_MOD*/VexRegisterUpdates* pxControl, 0791 const VexGuestExtents* ); 0792 0793 /* IN: optionally, a callback which allows the caller to add its 0794 own IR preamble following the self-check and any other 0795 VEX-generated preamble, if any. May be NULL. If non-NULL, 0796 the IRSB under construction is handed to this function, which 0797 presumably adds IR statements to it. The callback may 0798 optionally complete the block and direct bb_to_IR not to 0799 disassemble any instructions into it; this is indicated by 0800 the callback returning True. 0801 */ 0802 Bool (*preamble_function)(/*callback_opaque*/void*, IRSB*); 0803 0804 /* IN: debug: trace vex activity at various points */ 0805 Int traceflags; 0806 0807 /* IN: debug: print diagnostics when an illegal instr is detected */ 0808 Bool sigill_diag; 0809 0810 /* IN: profiling: add a 64 bit profiler counter increment to the 0811 translation? */ 0812 Bool addProfInc; 0813 0814 /* IN: address of the dispatcher entry points. Describes the 0815 places where generated code should jump to at the end of each 0816 bb. 0817 0818 At the end of each translation, the next guest address is 0819 placed in the host's standard return register (x86: %eax, 0820 amd64: %rax, ppc32: %r3, ppc64: %r3). Optionally, the guest 0821 state pointer register (on host x86: %ebp; amd64: %rbp; 0822 ppc32/64: r31) may be set to a VEX_TRC_ value to indicate any 0823 special action required before the next block is run. 0824 0825 Control is then passed back to the dispatcher (beyond Vex's 0826 control; caller supplies this) in the following way: 0827 0828 - On host archs which lack a link register (x86, amd64), by a 0829 jump to the host address specified in 0830 'dispatcher_assisted', if the guest state pointer has been 0831 changed so as to request some action before the next block 0832 is run, or 'dispatcher_unassisted' (the fast path), in 0833 which it is assumed that the guest state pointer is 0834 unchanged and we wish to continue directly with the next 0835 translation. Both of these must be non-NULL. 0836 0837 - On host archs which have a link register (ppc32, ppc64), by 0838 a branch to the link register (which is guaranteed to be 0839 unchanged from whatever it was at entry to the 0840 translation). 'dispatch_assisted' and 0841 'dispatch_unassisted' must be NULL. 0842 0843 The aim is to get back and forth between translations and the 0844 dispatcher without creating memory traffic to store return 0845 addresses. 0846 0847 FIXME: update this comment 0848 */ 0849 const void* disp_cp_chain_me_to_slowEP; 0850 const void* disp_cp_chain_me_to_fastEP; 0851 const void* disp_cp_xindir; 0852 const void* disp_cp_xassisted; 0853 } 0854 VexTranslateArgs; 0855 0856 0857 /* Runs the entire compilation pipeline. */ 0858 extern 0859 VexTranslateResult LibVEX_Translate ( /*MOD*/ VexTranslateArgs* ); 0860 0861 /* Runs the first half of the compilation pipeline: lifts guest code to IR, 0862 optimises, instruments and optimises it some more. */ 0863 extern 0864 IRSB* LibVEX_FrontEnd ( /*MOD*/ VexTranslateArgs*, 0865 /*OUT*/ VexTranslateResult* res, 0866 /*OUT*/ VexRegisterUpdates* pxControl ); 0867 0868 0869 /* A subtlety re interaction between self-checking translations and 0870 bb-chasing. The supplied chase_into_ok function should say NO 0871 (False) when presented with any address for which you might want to 0872 make a self-checking translation. 0873 0874 If it doesn't do that, you may end up with Vex chasing from BB #1 0875 to BB #2 (fine); but if you wanted checking for #2 and not #1, that 0876 would not be the result. Therefore chase_into_ok should disallow 0877 following into #2. That will force the caller to eventually 0878 request a new translation starting at #2, at which point Vex will 0879 correctly observe the make-a-self-check flag. 0880 0881 FIXME: is this still up to date? */ 0882 0883 0884 /*-------------------------------------------------------*/ 0885 /*--- Patch existing translations ---*/ 0886 /*-------------------------------------------------------*/ 0887 0888 /* A host address range that was modified by the functions below. 0889 Callers must request I-cache syncing after the call as appropriate. */ 0890 typedef 0891 struct { 0892 HWord start; 0893 HWord len; /* always > 0 */ 0894 } 0895 VexInvalRange; 0896 0897 /* Chain an XDirect jump located at place_to_chain so it jumps to 0898 place_to_jump_to. It is expected (and checked) that this site 0899 currently contains a call to the dispatcher specified by 0900 disp_cp_chain_me_EXPECTED. */ 0901 extern 0902 VexInvalRange LibVEX_Chain ( VexArch arch_host, 0903 VexEndness endhess_host, 0904 void* place_to_chain, 0905 const void* disp_cp_chain_me_EXPECTED, 0906 const void* place_to_jump_to ); 0907 0908 /* Undo an XDirect jump located at place_to_unchain, so it is 0909 converted back into a call to disp_cp_chain_me. It is expected 0910 (and checked) that this site currently contains a jump directly to 0911 the address specified by place_to_jump_to_EXPECTED. */ 0912 extern 0913 VexInvalRange LibVEX_UnChain ( VexArch arch_host, 0914 VexEndness endness_host, 0915 void* place_to_unchain, 0916 const void* place_to_jump_to_EXPECTED, 0917 const void* disp_cp_chain_me ); 0918 0919 /* Returns a constant -- the size of the event check that is put at 0920 the start of every translation. This makes it possible to 0921 calculate the fast entry point address if the slow entry point 0922 address is known (the usual case), or vice versa. */ 0923 extern 0924 Int LibVEX_evCheckSzB ( VexArch arch_host ); 0925 0926 0927 /* Patch the counter location into an existing ProfInc point. The 0928 specified point is checked to make sure it is plausible. */ 0929 extern 0930 VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host, 0931 VexEndness endness_host, 0932 void* place_to_patch, 0933 const ULong* location_of_counter ); 0934 0935 0936 /*-------------------------------------------------------*/ 0937 /*--- Show accumulated statistics ---*/ 0938 /*-------------------------------------------------------*/ 0939 0940 extern void LibVEX_ShowStats ( void ); 0941 0942 /*-------------------------------------------------------*/ 0943 /*-- IR injection --*/ 0944 /*-------------------------------------------------------*/ 0945 0946 /* IR Injection Control Block */ 0947 0948 #define NO_ROUNDING_MODE (~0u) 0949 0950 typedef 0951 struct { 0952 IROp op; // the operation to perform 0953 HWord result; // address of the result 0954 HWord opnd1; // address of 1st operand 0955 HWord opnd2; // address of 2nd operand 0956 HWord opnd3; // address of 3rd operand 0957 HWord opnd4; // address of 4th operand 0958 IRType t_result; // type of result 0959 IRType t_opnd1; // type of 1st operand 0960 IRType t_opnd2; // type of 2nd operand 0961 IRType t_opnd3; // type of 3rd operand 0962 IRType t_opnd4; // type of 4th operand 0963 UInt rounding_mode; 0964 UInt num_operands; // excluding rounding mode, if any 0965 /* The following two members describe if this operand has immediate 0966 * operands. There are a few restrictions: 0967 * (1) An operator can have at most one immediate operand. 0968 * (2) If there is an immediate operand, it is the right-most operand 0969 * An immediate_index of 0 means there is no immediate operand. 0970 */ 0971 UInt immediate_type; // size of immediate Ity_I8, Ity_16 0972 UInt immediate_index; // operand number: 1, 2 0973 } 0974 IRICB; 0975 0976 extern void LibVEX_InitIRI ( const IRICB * ); 0977 0978 /*-------------------------------------------------------*/ 0979 /*--- Notes ---*/ 0980 /*-------------------------------------------------------*/ 0981 0982 /* Code generation conventions that need to be recorded somewhere. 0983 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 0984 0985 x86 0986 ~~~ 0987 Generated code should be entered using a JMP instruction. On 0988 entry, %ebp should point to the guest state, and %esp should be a 0989 valid stack pointer. The generated code may change %eax, %ebx, 0990 %ecx, %edx, %esi, %edi, all the FP registers and control state, and 0991 all the XMM registers. 0992 0993 On entry, the FPU control word should be set to 0x027F, and the SSE 0994 control word (%mxcsr) should be set to 0x1F80. On exit, they 0995 should still have those values (after masking off the lowest 6 bits 0996 of %mxcsr). If they don't, there is a bug in VEX-generated code. 0997 0998 Generated code returns to the scheduler using a JMP instruction, to 0999 the address specified in the .dispatch field of VexTranslateArgs. 1000 %eax (or %eax:%edx, if simulating a 64-bit target) will contain the 1001 guest address of the next block to execute. %ebp may be changed 1002 to a VEX_TRC_ value, otherwise it should be as it was at entry. 1003 1004 CRITICAL ISSUES in x86 code generation. The only known critical 1005 issue is that the host FPU and SSE state is not properly saved 1006 across calls to helper functions. If any helper references any 1007 such state, it is likely (1) to misbehave itself, since the FP 1008 stack tags will not be as expected, and (2) after returning to 1009 generated code, the generated code is likely to go wrong. This 1010 really should be fixed. 1011 1012 amd64 1013 ~~~~~ 1014 Analogous to x86. 1015 1016 ppc32 1017 ~~~~~ 1018 On entry, guest state pointer is r31. .dispatch must be NULL. 1019 Control is returned with a branch to the link register. Generated 1020 code will not change lr. At return, r3 holds the next guest addr 1021 (or r3:r4 ?). r31 may be may be changed to a VEX_TRC_ value, 1022 otherwise it should be as it was at entry. 1023 1024 ppc64 1025 ~~~~~ 1026 Same as ppc32. 1027 1028 arm32 1029 ~~~~~ 1030 r8 is GSP. 1031 1032 arm64 1033 ~~~~~ 1034 r21 is GSP. 1035 1036 ALL GUEST ARCHITECTURES 1037 ~~~~~~~~~~~~~~~~~~~~~~~ 1038 The guest state must contain two pseudo-registers, guest_CMSTART 1039 and guest_CMLEN. These are used to specify guest address ranges, 1040 either of code to be invalidated, when used in conjunction with 1041 Ijk_InvalICache, or of d-cache ranges to be flushed, when used in 1042 conjunction with Ijk_FlushDCache. In such cases, the two _CM 1043 pseudo-regs should be filled in by the IR, and then an exit with 1044 one of the two abovementioned Ijk_ kinds should happen, so that the 1045 dispatcher can action them. Both pseudo-regs must have size equal 1046 to the guest word size. 1047 1048 The architecture must a third pseudo-register, guest_NRADDR, also 1049 guest-word-sized. This is used to record the unredirected guest 1050 address at the start of a translation whose start has been 1051 redirected. By reading this pseudo-register shortly afterwards, 1052 the translation can find out what the corresponding no-redirection 1053 address was. Note, this is only set for wrap-style redirects, not 1054 for replace-style ones. 1055 */ 1056 #endif /* ndef __LIBVEX_H */ 1057 1058 /*---------------------------------------------------------------*/ 1059 /*--- libvex.h ---*/ 1060 /*---------------------------------------------------------------*/
| [ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
|
This page was automatically generated by the 2.3.7 LXR engine. The LXR team |
|