|
||||
File indexing completed on 2025-01-18 10:13:29
0001 /* 0002 ---------------------------------------------------------------- 0003 0004 Notice that the above BSD-style license applies to this one file 0005 (helgrind.h) only. The entire rest of Valgrind is licensed under 0006 the terms of the GNU General Public License, version 2. See the 0007 COPYING file in the source distribution for details. 0008 0009 ---------------------------------------------------------------- 0010 0011 This file is part of Helgrind, a Valgrind tool for detecting errors 0012 in threaded programs. 0013 0014 Copyright (C) 2007-2017 OpenWorks LLP 0015 info@open-works.co.uk 0016 0017 Redistribution and use in source and binary forms, with or without 0018 modification, are permitted provided that the following conditions 0019 are met: 0020 0021 1. Redistributions of source code must retain the above copyright 0022 notice, this list of conditions and the following disclaimer. 0023 0024 2. The origin of this software must not be misrepresented; you must 0025 not claim that you wrote the original software. If you use this 0026 software in a product, an acknowledgment in the product 0027 documentation would be appreciated but is not required. 0028 0029 3. Altered source versions must be plainly marked as such, and must 0030 not be misrepresented as being the original software. 0031 0032 4. The name of the author may not be used to endorse or promote 0033 products derived from this software without specific prior written 0034 permission. 0035 0036 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 0037 OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 0038 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 0039 ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 0040 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 0041 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE 0042 GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 0043 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 0044 WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 0045 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 0046 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 0047 0048 ---------------------------------------------------------------- 0049 0050 Notice that the above BSD-style license applies to this one file 0051 (helgrind.h) only. The entire rest of Valgrind is licensed under 0052 the terms of the GNU General Public License, version 2. See the 0053 COPYING file in the source distribution for details. 0054 0055 ---------------------------------------------------------------- 0056 */ 0057 0058 #ifndef __HELGRIND_H 0059 #define __HELGRIND_H 0060 0061 #include "valgrind.h" 0062 0063 /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! 0064 This enum comprises an ABI exported by Valgrind to programs 0065 which use client requests. DO NOT CHANGE THE ORDER OF THESE 0066 ENTRIES, NOR DELETE ANY -- add new ones at the end. */ 0067 typedef 0068 enum { 0069 VG_USERREQ__HG_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'), 0070 0071 /* The rest are for Helgrind's internal use. Not for end-user 0072 use. Do not use them unless you are a Valgrind developer. */ 0073 0074 /* Notify the tool what this thread's pthread_t is. */ 0075 _VG_USERREQ__HG_SET_MY_PTHREAD_T = VG_USERREQ_TOOL_BASE('H','G') 0076 + 256, 0077 _VG_USERREQ__HG_PTH_API_ERROR, /* char*, int */ 0078 _VG_USERREQ__HG_PTHREAD_JOIN_POST, /* pthread_t of quitter */ 0079 _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, /* pth_mx_t*, long mbRec */ 0080 _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, /* pth_mx_t*, long isInit */ 0081 _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, /* pth_mx_t* */ 0082 _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, /* pth_mx_t* */ 0083 _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE, /* void*, long isTryLock */ 0084 _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST, /* void* */ 0085 _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE, /* pth_cond_t* */ 0086 _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE, /* pth_cond_t* */ 0087 _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE, /* pth_cond_t*, pth_mx_t* */ 0088 _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST, /* pth_cond_t*, pth_mx_t* */ 0089 _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE, /* pth_cond_t*, long isInit */ 0090 _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, /* pth_rwlk_t* */ 0091 _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, /* pth_rwlk_t* */ 0092 _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE, /* pth_rwlk_t*, long isW */ 0093 _VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED, /* void*, long isW */ 0094 _VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED, /* void* */ 0095 _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST, /* pth_rwlk_t* */ 0096 _VG_USERREQ__HG_POSIX_SEM_INIT_POST, /* sem_t*, ulong value */ 0097 _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, /* sem_t* */ 0098 _VG_USERREQ__HG_POSIX_SEM_RELEASED, /* void* */ 0099 _VG_USERREQ__HG_POSIX_SEM_ACQUIRED, /* void* */ 0100 _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, /* pth_bar_t*, ulong, ulong */ 0101 _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, /* pth_bar_t* */ 0102 _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, /* pth_bar_t* */ 0103 _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE, /* pth_slk_t* */ 0104 _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST, /* pth_slk_t* */ 0105 _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE, /* pth_slk_t* */ 0106 _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST, /* pth_slk_t* */ 0107 _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE, /* pth_slk_t* */ 0108 _VG_USERREQ__HG_CLIENTREQ_UNIMP, /* char* */ 0109 _VG_USERREQ__HG_USERSO_SEND_PRE, /* arbitrary UWord SO-tag */ 0110 _VG_USERREQ__HG_USERSO_RECV_POST, /* arbitrary UWord SO-tag */ 0111 _VG_USERREQ__HG_USERSO_FORGET_ALL, /* arbitrary UWord SO-tag */ 0112 _VG_USERREQ__HG_RESERVED2, /* Do not use */ 0113 _VG_USERREQ__HG_RESERVED3, /* Do not use */ 0114 _VG_USERREQ__HG_RESERVED4, /* Do not use */ 0115 _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, /* Addr a, ulong len */ 0116 _VG_USERREQ__HG_ARANGE_MAKE_TRACKED, /* Addr a, ulong len */ 0117 _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, /* pth_bar_t*, ulong */ 0118 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, /* Addr start_of_block */ 0119 _VG_USERREQ__HG_PTHREAD_COND_INIT_POST, /* pth_cond_t*, pth_cond_attr_t*/ 0120 _VG_USERREQ__HG_GNAT_MASTER_HOOK, /* void*d,void*m,Word ml */ 0121 _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK, /* void*s,Word ml */ 0122 _VG_USERREQ__HG_GET_ABITS, /* Addr a,Addr abits, ulong len */ 0123 _VG_USERREQ__HG_PTHREAD_CREATE_BEGIN, 0124 _VG_USERREQ__HG_PTHREAD_CREATE_END, 0125 _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE, /* pth_mx_t*,long isTryLock */ 0126 _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST, /* pth_mx_t *,long tookLock */ 0127 _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST, /* pth_rwlk_t*,long isW,long */ 0128 _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE, /* pth_rwlk_t* */ 0129 _VG_USERREQ__HG_POSIX_SEM_POST_PRE, /* sem_t* */ 0130 _VG_USERREQ__HG_POSIX_SEM_POST_POST, /* sem_t* */ 0131 _VG_USERREQ__HG_POSIX_SEM_WAIT_PRE, /* sem_t* */ 0132 _VG_USERREQ__HG_POSIX_SEM_WAIT_POST, /* sem_t*, long tookLock */ 0133 _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST, /* pth_cond_t* */ 0134 _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST,/* pth_cond_t* */ 0135 _VG_USERREQ__HG_RTLD_BIND_GUARD, /* int flags */ 0136 _VG_USERREQ__HG_RTLD_BIND_CLEAR, /* int flags */ 0137 _VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN /* void*d, void*m */ 0138 } Vg_TCheckClientRequest; 0139 0140 0141 /*----------------------------------------------------------------*/ 0142 /*--- ---*/ 0143 /*--- Implementation-only facilities. Not for end-user use. ---*/ 0144 /*--- For end-user facilities see below (the next section in ---*/ 0145 /*--- this file.) ---*/ 0146 /*--- ---*/ 0147 /*----------------------------------------------------------------*/ 0148 0149 /* Do a client request. These are macros rather than a functions so 0150 as to avoid having an extra frame in stack traces. 0151 0152 NB: these duplicate definitions in hg_intercepts.c. But here, we 0153 have to make do with weaker typing (no definition of Word etc) and 0154 no assertions, whereas in helgrind.h we can use those facilities. 0155 Obviously it's important the two sets of definitions are kept in 0156 sync. 0157 0158 The commented-out asserts should actually hold, but unfortunately 0159 they can't be allowed to be visible here, because that would 0160 require the end-user code to #include <assert.h>. 0161 */ 0162 0163 #define DO_CREQ_v_W(_creqF, _ty1F,_arg1F) \ 0164 do { \ 0165 long int _arg1; \ 0166 /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ 0167 _arg1 = (long int)(_arg1F); \ 0168 VALGRIND_DO_CLIENT_REQUEST_STMT( \ 0169 (_creqF), \ 0170 _arg1, 0,0,0,0); \ 0171 } while (0) 0172 0173 #define DO_CREQ_W_W(_resF, _dfltF, _creqF, _ty1F,_arg1F) \ 0174 do { \ 0175 long int _arg1; \ 0176 /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ 0177 _arg1 = (long int)(_arg1F); \ 0178 _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR( \ 0179 (_dfltF), \ 0180 (_creqF), \ 0181 _arg1, 0,0,0,0); \ 0182 _resF = _qzz_res; \ 0183 } while (0) 0184 0185 #define DO_CREQ_v_WW(_creqF, _ty1F,_arg1F, _ty2F,_arg2F) \ 0186 do { \ 0187 long int _arg1, _arg2; \ 0188 /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ 0189 /* assert(sizeof(_ty2F) == sizeof(long int)); */ \ 0190 _arg1 = (long int)(_arg1F); \ 0191 _arg2 = (long int)(_arg2F); \ 0192 VALGRIND_DO_CLIENT_REQUEST_STMT( \ 0193 (_creqF), \ 0194 _arg1,_arg2,0,0,0); \ 0195 } while (0) 0196 0197 #define DO_CREQ_v_WWW(_creqF, _ty1F,_arg1F, \ 0198 _ty2F,_arg2F, _ty3F, _arg3F) \ 0199 do { \ 0200 long int _arg1, _arg2, _arg3; \ 0201 /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ 0202 /* assert(sizeof(_ty2F) == sizeof(long int)); */ \ 0203 /* assert(sizeof(_ty3F) == sizeof(long int)); */ \ 0204 _arg1 = (long int)(_arg1F); \ 0205 _arg2 = (long int)(_arg2F); \ 0206 _arg3 = (long int)(_arg3F); \ 0207 VALGRIND_DO_CLIENT_REQUEST_STMT( \ 0208 (_creqF), \ 0209 _arg1,_arg2,_arg3,0,0); \ 0210 } while (0) 0211 0212 #define DO_CREQ_W_WWW(_resF, _dfltF, _creqF, _ty1F,_arg1F, \ 0213 _ty2F,_arg2F, _ty3F, _arg3F) \ 0214 do { \ 0215 long int _qzz_res; \ 0216 long int _arg1, _arg2, _arg3; \ 0217 /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ 0218 _arg1 = (long int)(_arg1F); \ 0219 _arg2 = (long int)(_arg2F); \ 0220 _arg3 = (long int)(_arg3F); \ 0221 _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR( \ 0222 (_dfltF), \ 0223 (_creqF), \ 0224 _arg1,_arg2,_arg3,0,0); \ 0225 _resF = _qzz_res; \ 0226 } while (0) 0227 0228 0229 0230 #define _HG_CLIENTREQ_UNIMP(_qzz_str) \ 0231 DO_CREQ_v_W(_VG_USERREQ__HG_CLIENTREQ_UNIMP, \ 0232 (char*),(_qzz_str)) 0233 0234 0235 /*----------------------------------------------------------------*/ 0236 /*--- ---*/ 0237 /*--- Helgrind-native requests. These allow access to ---*/ 0238 /*--- the same set of annotation primitives that are used ---*/ 0239 /*--- to build the POSIX pthread wrappers. ---*/ 0240 /*--- ---*/ 0241 /*----------------------------------------------------------------*/ 0242 0243 /* ---------------------------------------------------------- 0244 For describing ordinary mutexes (non-rwlocks). For rwlock 0245 descriptions see ANNOTATE_RWLOCK_* below. 0246 ---------------------------------------------------------- */ 0247 0248 /* Notify here immediately after mutex creation. _mbRec == 0 for a 0249 non-recursive mutex, 1 for a recursive mutex. */ 0250 #define VALGRIND_HG_MUTEX_INIT_POST(_mutex, _mbRec) \ 0251 DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, \ 0252 void*,(_mutex), long,(_mbRec)) 0253 0254 /* Notify here immediately before mutex acquisition. _isTryLock == 0 0255 for a normal acquisition, 1 for a "try" style acquisition. */ 0256 #define VALGRIND_HG_MUTEX_LOCK_PRE(_mutex, _isTryLock) \ 0257 DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE, \ 0258 void*,(_mutex), long,(_isTryLock)) 0259 0260 /* Notify here immediately after a successful mutex acquisition. */ 0261 #define VALGRIND_HG_MUTEX_LOCK_POST(_mutex) \ 0262 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST, \ 0263 void*,(_mutex)) 0264 0265 /* Notify here immediately before a mutex release. */ 0266 #define VALGRIND_HG_MUTEX_UNLOCK_PRE(_mutex) \ 0267 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, \ 0268 void*,(_mutex)) 0269 0270 /* Notify here immediately after a mutex release. */ 0271 #define VALGRIND_HG_MUTEX_UNLOCK_POST(_mutex) \ 0272 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, \ 0273 void*,(_mutex)) 0274 0275 /* Notify here immediately before mutex destruction. */ 0276 #define VALGRIND_HG_MUTEX_DESTROY_PRE(_mutex) \ 0277 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, \ 0278 void*,(_mutex)) 0279 0280 /* ---------------------------------------------------------- 0281 For describing semaphores. 0282 ---------------------------------------------------------- */ 0283 0284 /* Notify here immediately after semaphore creation. */ 0285 #define VALGRIND_HG_SEM_INIT_POST(_sem, _value) \ 0286 DO_CREQ_v_WW(_VG_USERREQ__HG_POSIX_SEM_INIT_POST, \ 0287 void*, (_sem), unsigned long, (_value)) 0288 0289 /* Notify here immediately after a semaphore wait (an acquire-style 0290 operation) */ 0291 #define VALGRIND_HG_SEM_WAIT_POST(_sem) \ 0292 DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_ACQUIRED, \ 0293 void*,(_sem)) 0294 0295 /* Notify here immediately before semaphore post (a release-style 0296 operation) */ 0297 #define VALGRIND_HG_SEM_POST_PRE(_sem) \ 0298 DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_RELEASED, \ 0299 void*,(_sem)) 0300 0301 /* Notify here immediately before semaphore destruction. */ 0302 #define VALGRIND_HG_SEM_DESTROY_PRE(_sem) \ 0303 DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, \ 0304 void*, (_sem)) 0305 0306 /* ---------------------------------------------------------- 0307 For describing barriers. 0308 ---------------------------------------------------------- */ 0309 0310 /* Notify here immediately before barrier creation. _count is the 0311 capacity. _resizable == 0 means the barrier may not be resized, 1 0312 means it may be. */ 0313 #define VALGRIND_HG_BARRIER_INIT_PRE(_bar, _count, _resizable) \ 0314 DO_CREQ_v_WWW(_VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, \ 0315 void*,(_bar), \ 0316 unsigned long,(_count), \ 0317 unsigned long,(_resizable)) 0318 0319 /* Notify here immediately before arrival at a barrier. */ 0320 #define VALGRIND_HG_BARRIER_WAIT_PRE(_bar) \ 0321 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, \ 0322 void*,(_bar)) 0323 0324 /* Notify here immediately before a resize (change of barrier 0325 capacity). If _newcount >= the existing capacity, then there is no 0326 change in the state of any threads waiting at the barrier. If 0327 _newcount < the existing capacity, and >= _newcount threads are 0328 currently waiting at the barrier, then this notification is 0329 considered to also have the effect of telling the checker that all 0330 waiting threads have now moved past the barrier. (I can't think of 0331 any other sane semantics.) */ 0332 #define VALGRIND_HG_BARRIER_RESIZE_PRE(_bar, _newcount) \ 0333 DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, \ 0334 void*,(_bar), \ 0335 unsigned long,(_newcount)) 0336 0337 /* Notify here immediately before barrier destruction. */ 0338 #define VALGRIND_HG_BARRIER_DESTROY_PRE(_bar) \ 0339 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, \ 0340 void*,(_bar)) 0341 0342 /* ---------------------------------------------------------- 0343 For describing memory ownership changes. 0344 ---------------------------------------------------------- */ 0345 0346 /* Clean memory state. This makes Helgrind forget everything it knew 0347 about the specified memory range. Effectively this announces that 0348 the specified memory range now "belongs" to the calling thread, so 0349 that: (1) the calling thread can access it safely without 0350 synchronisation, and (2) all other threads must sync with this one 0351 to access it safely. This is particularly useful for memory 0352 allocators that wish to recycle memory. */ 0353 #define VALGRIND_HG_CLEAN_MEMORY(_qzz_start, _qzz_len) \ 0354 DO_CREQ_v_WW(VG_USERREQ__HG_CLEAN_MEMORY, \ 0355 void*,(_qzz_start), \ 0356 unsigned long,(_qzz_len)) 0357 0358 /* The same, but for the heap block starting at _qzz_blockstart. This 0359 allows painting when we only know the address of an object, but not 0360 its size, which is sometimes the case in C++ code involving 0361 inheritance, and in which RTTI is not, for whatever reason, 0362 available. Returns the number of bytes painted, which can be zero 0363 for a zero-sized block. Hence, return values >= 0 indicate success 0364 (the block was found), and the value -1 indicates block not 0365 found, and -2 is returned when not running on Helgrind. */ 0366 #define VALGRIND_HG_CLEAN_MEMORY_HEAPBLOCK(_qzz_blockstart) \ 0367 (__extension__ \ 0368 ({long int _npainted; \ 0369 DO_CREQ_W_W(_npainted, (-2)/*default*/, \ 0370 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, \ 0371 void*,(_qzz_blockstart)); \ 0372 _npainted; \ 0373 })) 0374 0375 /* ---------------------------------------------------------- 0376 For error control. 0377 ---------------------------------------------------------- */ 0378 0379 /* Tell H that an address range is not to be "tracked" until further 0380 notice. This puts it in the NOACCESS state, in which case we 0381 ignore all reads and writes to it. Useful for ignoring ranges of 0382 memory where there might be races we don't want to see. If the 0383 memory is subsequently reallocated via malloc/new/stack allocation, 0384 then it is put back in the trackable state. Hence it is safe in 0385 the situation where checking is disabled, the containing area is 0386 deallocated and later reallocated for some other purpose. */ 0387 #define VALGRIND_HG_DISABLE_CHECKING(_qzz_start, _qzz_len) \ 0388 DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, \ 0389 void*,(_qzz_start), \ 0390 unsigned long,(_qzz_len)) 0391 0392 /* And put it back into the normal "tracked" state, that is, make it 0393 once again subject to the normal race-checking machinery. This 0394 puts it in the same state as new memory allocated by this thread -- 0395 that is, basically owned exclusively by this thread. */ 0396 #define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len) \ 0397 DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED, \ 0398 void*,(_qzz_start), \ 0399 unsigned long,(_qzz_len)) 0400 0401 0402 /* Checks the accessibility bits for addresses [zza..zza+zznbytes-1]. 0403 If zzabits array is provided, copy the accessibility bits in zzabits. 0404 Return values: 0405 -2 if not running on helgrind 0406 -1 if any parts of zzabits is not addressable 0407 >= 0 : success. 0408 When success, it returns the nr of addressable bytes found. 0409 So, to check that a whole range is addressable, check 0410 VALGRIND_HG_GET_ABITS(addr,NULL,len) == len 0411 In addition, if you want to examine the addressability of each 0412 byte of the range, you need to provide a non NULL ptr as 0413 second argument, pointing to an array of unsigned char 0414 of length len. 0415 Addressable bytes are indicated with 0xff. 0416 Non-addressable bytes are indicated with 0x00. 0417 */ 0418 #define VALGRIND_HG_GET_ABITS(zza,zzabits,zznbytes) \ 0419 (__extension__ \ 0420 ({long int _res; \ 0421 DO_CREQ_W_WWW(_res, (-2)/*default*/, \ 0422 _VG_USERREQ__HG_GET_ABITS, \ 0423 void*,(zza), void*,(zzabits), \ 0424 unsigned long,(zznbytes)); \ 0425 _res; \ 0426 })) 0427 0428 /* End-user request for Ada applications compiled with GNAT. 0429 Helgrind understands the Ada concept of Ada task dependencies and 0430 terminations. See Ada Reference Manual section 9.3 "Task Dependence 0431 - Termination of Tasks". 0432 However, in some cases, the master of (terminated) tasks completes 0433 only when the application exits. An example of this is dynamically 0434 allocated tasks with an access type defined at Library Level. 0435 By default, the state of such tasks in Helgrind will be 'exited but 0436 join not done yet'. Many tasks in such a state are however causing 0437 Helgrind CPU and memory to increase significantly. 0438 VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN can be used to indicate 0439 to Helgrind that a not yet completed master has however already 0440 'seen' the termination of a dependent : this is conceptually the 0441 same as a pthread_join and causes the cleanup of the dependent 0442 as done by Helgrind when a master completes. 0443 This allows to avoid the overhead in helgrind caused by such tasks. 0444 A typical usage for a master to indicate it has done conceptually a join 0445 with a dependent task before the master completes is: 0446 while not Dep_Task'Terminated loop 0447 ... do whatever to wait for Dep_Task termination. 0448 end loop; 0449 VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN 0450 (Dep_Task'Identity, 0451 Ada.Task_Identification.Current_Task); 0452 Note that VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN should be a binding 0453 to a C function built with the below macro. */ 0454 #define VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN(_qzz_dep, _qzz_master) \ 0455 DO_CREQ_v_WW(_VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN, \ 0456 void*,(_qzz_dep), \ 0457 void*,(_qzz_master)) 0458 0459 /*----------------------------------------------------------------*/ 0460 /*--- ---*/ 0461 /*--- ThreadSanitizer-compatible requests ---*/ 0462 /*--- (mostly unimplemented) ---*/ 0463 /*--- ---*/ 0464 /*----------------------------------------------------------------*/ 0465 0466 /* A quite-broad set of annotations, as used in the ThreadSanitizer 0467 project. This implementation aims to be a (source-level) 0468 compatible implementation of the macros defined in: 0469 0470 http://code.google.com/p/data-race-test/source 0471 /browse/trunk/dynamic_annotations/dynamic_annotations.h 0472 0473 (some of the comments below are taken from the above file) 0474 0475 The implementation here is very incomplete, and intended as a 0476 starting point. Many of the macros are unimplemented. Rather than 0477 allowing unimplemented macros to silently do nothing, they cause an 0478 assertion. Intention is to implement them on demand. 0479 0480 The major use of these macros is to make visible to race detectors, 0481 the behaviour (effects) of user-implemented synchronisation 0482 primitives, that the detectors could not otherwise deduce from the 0483 normal observation of pthread etc calls. 0484 0485 Some of the macros are no-ops in Helgrind. That's because Helgrind 0486 is a pure happens-before detector, whereas ThreadSanitizer uses a 0487 hybrid lockset and happens-before scheme, which requires more 0488 accurate annotations for correct operation. 0489 0490 The macros are listed in the same order as in dynamic_annotations.h 0491 (URL just above). 0492 0493 I should point out that I am less than clear about the intended 0494 semantics of quite a number of them. Comments and clarifications 0495 welcomed! 0496 */ 0497 0498 /* ---------------------------------------------------------------- 0499 These four allow description of user-level condition variables, 0500 apparently in the style of POSIX's pthread_cond_t. Currently 0501 unimplemented and will assert. 0502 ---------------------------------------------------------------- 0503 */ 0504 /* Report that wait on the condition variable at address CV has 0505 succeeded and the lock at address LOCK is now held. CV and LOCK 0506 are completely arbitrary memory addresses which presumably mean 0507 something to the application, but are meaningless to Helgrind. */ 0508 #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \ 0509 _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_LOCK_WAIT") 0510 0511 /* Report that wait on the condition variable at CV has succeeded. 0512 Variant w/o lock. */ 0513 #define ANNOTATE_CONDVAR_WAIT(cv) \ 0514 _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_WAIT") 0515 0516 /* Report that we are about to signal on the condition variable at 0517 address CV. */ 0518 #define ANNOTATE_CONDVAR_SIGNAL(cv) \ 0519 _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL") 0520 0521 /* Report that we are about to signal_all on the condition variable at 0522 CV. */ 0523 #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \ 0524 _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL_ALL") 0525 0526 0527 /* ---------------------------------------------------------------- 0528 Create completely arbitrary happens-before edges between threads. 0529 0530 If threads T1 .. Tn all do ANNOTATE_HAPPENS_BEFORE(obj) and later 0531 (w.r.t. some notional global clock for the computation) thread Tm 0532 does ANNOTATE_HAPPENS_AFTER(obj), then Helgrind will regard all 0533 memory accesses done by T1 .. Tn before the ..BEFORE.. call as 0534 happening-before all memory accesses done by Tm after the 0535 ..AFTER.. call. Hence Helgrind won't complain about races if Tm's 0536 accesses afterwards are to the same locations as accesses before by 0537 any of T1 .. Tn. 0538 0539 OBJ is a machine word (unsigned long, or void*), is completely 0540 arbitrary, and denotes the identity of some synchronisation object 0541 you're modelling. 0542 0543 You must do the _BEFORE call just before the real sync event on the 0544 signaller's side, and _AFTER just after the real sync event on the 0545 waiter's side. 0546 0547 If none of the rest of these macros make sense to you, at least 0548 take the time to understand these two. They form the very essence 0549 of describing arbitrary inter-thread synchronisation events to 0550 Helgrind. You can get a long way just with them alone. 0551 0552 See also, extensive discussion on semantics of this in 0553 https://bugs.kde.org/show_bug.cgi?id=243935 0554 0555 ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) is interim until such time 0556 as bug 243935 is fully resolved. It instructs Helgrind to forget 0557 about any ANNOTATE_HAPPENS_BEFORE calls on the specified object, in 0558 effect putting it back in its original state. Once in that state, 0559 a use of ANNOTATE_HAPPENS_AFTER on it has no effect on the calling 0560 thread. 0561 0562 An implementation may optionally release resources it has 0563 associated with 'obj' when ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) 0564 happens. Users are recommended to use 0565 ANNOTATE_HAPPENS_BEFORE_FORGET_ALL to indicate when a 0566 synchronisation object is no longer needed, so as to avoid 0567 potential indefinite resource leaks. 0568 ---------------------------------------------------------------- 0569 */ 0570 #define ANNOTATE_HAPPENS_BEFORE(obj) \ 0571 DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_SEND_PRE, void*,(obj)) 0572 0573 #define ANNOTATE_HAPPENS_AFTER(obj) \ 0574 DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_RECV_POST, void*,(obj)) 0575 0576 #define ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) \ 0577 DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_FORGET_ALL, void*,(obj)) 0578 0579 /* ---------------------------------------------------------------- 0580 Memory publishing. The TSan sources say: 0581 0582 Report that the bytes in the range [pointer, pointer+size) are about 0583 to be published safely. The race checker will create a happens-before 0584 arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to 0585 subsequent accesses to this memory. 0586 0587 I'm not sure I understand what this means exactly, nor whether it 0588 is relevant for a pure h-b detector. Leaving unimplemented for 0589 now. 0590 ---------------------------------------------------------------- 0591 */ 0592 #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \ 0593 _HG_CLIENTREQ_UNIMP("ANNOTATE_PUBLISH_MEMORY_RANGE") 0594 0595 /* DEPRECATED. Don't use it. */ 0596 /* #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) */ 0597 0598 /* DEPRECATED. Don't use it. */ 0599 /* #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) */ 0600 0601 0602 /* ---------------------------------------------------------------- 0603 TSan sources say: 0604 0605 Instruct the tool to create a happens-before arc between 0606 MU->Unlock() and MU->Lock(). This annotation may slow down the 0607 race detector; normally it is used only when it would be 0608 difficult to annotate each of the mutex's critical sections 0609 individually using the annotations above. 0610 0611 If MU is a posix pthread_mutex_t then Helgrind will do this anyway. 0612 In any case, leave as unimp for now. I'm unsure about the intended 0613 behaviour. 0614 ---------------------------------------------------------------- 0615 */ 0616 #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \ 0617 _HG_CLIENTREQ_UNIMP("ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX") 0618 0619 /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */ 0620 /* #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) */ 0621 0622 0623 /* ---------------------------------------------------------------- 0624 TSan sources say: 0625 0626 Annotations useful when defining memory allocators, or when 0627 memory that was protected in one way starts to be protected in 0628 another. 0629 0630 Report that a new memory at "address" of size "size" has been 0631 allocated. This might be used when the memory has been retrieved 0632 from a free list and is about to be reused, or when a the locking 0633 discipline for a variable changes. 0634 0635 AFAICS this is the same as VALGRIND_HG_CLEAN_MEMORY. 0636 ---------------------------------------------------------------- 0637 */ 0638 #define ANNOTATE_NEW_MEMORY(address, size) \ 0639 VALGRIND_HG_CLEAN_MEMORY((address), (size)) 0640 0641 0642 /* ---------------------------------------------------------------- 0643 TSan sources say: 0644 0645 Annotations useful when defining FIFO queues that transfer data 0646 between threads. 0647 0648 All unimplemented. Am not claiming to understand this (yet). 0649 ---------------------------------------------------------------- 0650 */ 0651 0652 /* Report that the producer-consumer queue object at address PCQ has 0653 been created. The ANNOTATE_PCQ_* annotations should be used only 0654 for FIFO queues. For non-FIFO queues use ANNOTATE_HAPPENS_BEFORE 0655 (for put) and ANNOTATE_HAPPENS_AFTER (for get). */ 0656 #define ANNOTATE_PCQ_CREATE(pcq) \ 0657 _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_CREATE") 0658 0659 /* Report that the queue at address PCQ is about to be destroyed. */ 0660 #define ANNOTATE_PCQ_DESTROY(pcq) \ 0661 _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_DESTROY") 0662 0663 /* Report that we are about to put an element into a FIFO queue at 0664 address PCQ. */ 0665 #define ANNOTATE_PCQ_PUT(pcq) \ 0666 _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_PUT") 0667 0668 /* Report that we've just got an element from a FIFO queue at address 0669 PCQ. */ 0670 #define ANNOTATE_PCQ_GET(pcq) \ 0671 _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_GET") 0672 0673 0674 /* ---------------------------------------------------------------- 0675 Annotations that suppress errors. It is usually better to express 0676 the program's synchronization using the other annotations, but 0677 these can be used when all else fails. 0678 0679 Currently these are all unimplemented. I can't think of a simple 0680 way to implement them without at least some performance overhead. 0681 ---------------------------------------------------------------- 0682 */ 0683 0684 /* Report that we may have a benign race at "pointer", with size 0685 "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the 0686 point where "pointer" has been allocated, preferably close to the point 0687 where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. 0688 0689 XXX: what's this actually supposed to do? And what's the type of 0690 DESCRIPTION? When does the annotation stop having an effect? 0691 */ 0692 #define ANNOTATE_BENIGN_RACE(pointer, description) \ 0693 _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE") 0694 0695 /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to 0696 the memory range [address, address+size). */ 0697 #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ 0698 VALGRIND_HG_DISABLE_CHECKING(address, size) 0699 0700 /* Request the analysis tool to ignore all reads in the current thread 0701 until ANNOTATE_IGNORE_READS_END is called. Useful to ignore 0702 intentional racey reads, while still checking other reads and all 0703 writes. */ 0704 #define ANNOTATE_IGNORE_READS_BEGIN() \ 0705 _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_BEGIN") 0706 0707 /* Stop ignoring reads. */ 0708 #define ANNOTATE_IGNORE_READS_END() \ 0709 _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_END") 0710 0711 /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */ 0712 #define ANNOTATE_IGNORE_WRITES_BEGIN() \ 0713 _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_BEGIN") 0714 0715 /* Stop ignoring writes. */ 0716 #define ANNOTATE_IGNORE_WRITES_END() \ 0717 _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_END") 0718 0719 /* Start ignoring all memory accesses (reads and writes). */ 0720 #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ 0721 do { \ 0722 ANNOTATE_IGNORE_READS_BEGIN(); \ 0723 ANNOTATE_IGNORE_WRITES_BEGIN(); \ 0724 } while (0) 0725 0726 /* Stop ignoring all memory accesses. */ 0727 #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ 0728 do { \ 0729 ANNOTATE_IGNORE_WRITES_END(); \ 0730 ANNOTATE_IGNORE_READS_END(); \ 0731 } while (0) 0732 0733 0734 /* ---------------------------------------------------------------- 0735 Annotations useful for debugging. 0736 0737 Again, so for unimplemented, partly for performance reasons. 0738 ---------------------------------------------------------------- 0739 */ 0740 0741 /* Request to trace every access to ADDRESS. */ 0742 #define ANNOTATE_TRACE_MEMORY(address) \ 0743 _HG_CLIENTREQ_UNIMP("ANNOTATE_TRACE_MEMORY") 0744 0745 /* Report the current thread name to a race detector. */ 0746 #define ANNOTATE_THREAD_NAME(name) \ 0747 _HG_CLIENTREQ_UNIMP("ANNOTATE_THREAD_NAME") 0748 0749 0750 /* ---------------------------------------------------------------- 0751 Annotations for describing behaviour of user-implemented lock 0752 primitives. In all cases, the LOCK argument is a completely 0753 arbitrary machine word (unsigned long, or void*) and can be any 0754 value which gives a unique identity to the lock objects being 0755 modelled. 0756 0757 We just pretend they're ordinary posix rwlocks. That'll probably 0758 give some rather confusing wording in error messages, claiming that 0759 the arbitrary LOCK values are pthread_rwlock_t*'s, when in fact 0760 they are not. Ah well. 0761 ---------------------------------------------------------------- 0762 */ 0763 /* Report that a lock has just been created at address LOCK. */ 0764 #define ANNOTATE_RWLOCK_CREATE(lock) \ 0765 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, \ 0766 void*,(lock)) 0767 0768 /* Report that the lock at address LOCK is about to be destroyed. */ 0769 #define ANNOTATE_RWLOCK_DESTROY(lock) \ 0770 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, \ 0771 void*,(lock)) 0772 0773 /* Report that the lock at address LOCK has just been acquired. 0774 is_w=1 for writer lock, is_w=0 for reader lock. */ 0775 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ 0776 DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED, \ 0777 void*,(lock), unsigned long,(is_w)) 0778 0779 /* Report that the lock at address LOCK is about to be released. */ 0780 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ 0781 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED, \ 0782 void*,(lock)) /* is_w is ignored */ 0783 0784 0785 /* ------------------------------------------------------------- 0786 Annotations useful when implementing barriers. They are not 0787 normally needed by modules that merely use barriers. 0788 The "barrier" argument is a pointer to the barrier object. 0789 ---------------------------------------------------------------- 0790 */ 0791 0792 /* Report that the "barrier" has been initialized with initial 0793 "count". If 'reinitialization_allowed' is true, initialization is 0794 allowed to happen multiple times w/o calling barrier_destroy() */ 0795 #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \ 0796 _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_INIT") 0797 0798 /* Report that we are about to enter barrier_wait("barrier"). */ 0799 #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \ 0800 _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY") 0801 0802 /* Report that we just exited barrier_wait("barrier"). */ 0803 #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \ 0804 _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY") 0805 0806 /* Report that the "barrier" has been destroyed. */ 0807 #define ANNOTATE_BARRIER_DESTROY(barrier) \ 0808 _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY") 0809 0810 0811 /* ---------------------------------------------------------------- 0812 Annotations useful for testing race detectors. 0813 ---------------------------------------------------------------- 0814 */ 0815 0816 /* Report that we expect a race on the variable at ADDRESS. Use only 0817 in unit tests for a race detector. */ 0818 #define ANNOTATE_EXPECT_RACE(address, description) \ 0819 _HG_CLIENTREQ_UNIMP("ANNOTATE_EXPECT_RACE") 0820 0821 /* A no-op. Insert where you like to test the interceptors. */ 0822 #define ANNOTATE_NO_OP(arg) \ 0823 _HG_CLIENTREQ_UNIMP("ANNOTATE_NO_OP") 0824 0825 /* Force the race detector to flush its state. The actual effect depends on 0826 * the implementation of the detector. */ 0827 #define ANNOTATE_FLUSH_STATE() \ 0828 _HG_CLIENTREQ_UNIMP("ANNOTATE_FLUSH_STATE") 0829 0830 #endif /* __HELGRIND_H */
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.3.7 LXR engine. The LXR team |