helgrind.h 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830
  1. /*
  2. ----------------------------------------------------------------
  3. Notice that the above BSD-style license applies to this one file
  4. (helgrind.h) only. The entire rest of Valgrind is licensed under
  5. the terms of the GNU General Public License, version 2. See the
  6. COPYING file in the source distribution for details.
  7. ----------------------------------------------------------------
  8. This file is part of Helgrind, a Valgrind tool for detecting errors
  9. in threaded programs.
  10. Copyright (C) 2007-2017 OpenWorks LLP
  11. info@open-works.co.uk
  12. Redistribution and use in source and binary forms, with or without
  13. modification, are permitted provided that the following conditions
  14. are met:
  15. 1. Redistributions of source code must retain the above copyright
  16. notice, this list of conditions and the following disclaimer.
  17. 2. The origin of this software must not be misrepresented; you must
  18. not claim that you wrote the original software. If you use this
  19. software in a product, an acknowledgment in the product
  20. documentation would be appreciated but is not required.
  21. 3. Altered source versions must be plainly marked as such, and must
  22. not be misrepresented as being the original software.
  23. 4. The name of the author may not be used to endorse or promote
  24. products derived from this software without specific prior written
  25. permission.
  26. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
  27. OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  28. WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  29. ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  30. DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  31. DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
  32. GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  33. INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  34. WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  35. NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  36. SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  37. ----------------------------------------------------------------
  38. Notice that the above BSD-style license applies to this one file
  39. (helgrind.h) only. The entire rest of Valgrind is licensed under
  40. the terms of the GNU General Public License, version 2. See the
  41. COPYING file in the source distribution for details.
  42. ----------------------------------------------------------------
  43. */
  44. #ifndef __HELGRIND_H
  45. #define __HELGRIND_H
  46. #include "valgrind.h"
  47. /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
  48. This enum comprises an ABI exported by Valgrind to programs
  49. which use client requests. DO NOT CHANGE THE ORDER OF THESE
  50. ENTRIES, NOR DELETE ANY -- add new ones at the end. */
  51. typedef
  52. enum {
  53. VG_USERREQ__HG_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'),
  54. /* The rest are for Helgrind's internal use. Not for end-user
  55. use. Do not use them unless you are a Valgrind developer. */
  56. /* Notify the tool what this thread's pthread_t is. */
  57. _VG_USERREQ__HG_SET_MY_PTHREAD_T = VG_USERREQ_TOOL_BASE('H','G')
  58. + 256,
  59. _VG_USERREQ__HG_PTH_API_ERROR, /* char*, int */
  60. _VG_USERREQ__HG_PTHREAD_JOIN_POST, /* pthread_t of quitter */
  61. _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, /* pth_mx_t*, long mbRec */
  62. _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, /* pth_mx_t*, long isInit */
  63. _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, /* pth_mx_t* */
  64. _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, /* pth_mx_t* */
  65. _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE, /* void*, long isTryLock */
  66. _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST, /* void* */
  67. _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE, /* pth_cond_t* */
  68. _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE, /* pth_cond_t* */
  69. _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE, /* pth_cond_t*, pth_mx_t* */
  70. _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST, /* pth_cond_t*, pth_mx_t* */
  71. _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE, /* pth_cond_t*, long isInit */
  72. _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, /* pth_rwlk_t* */
  73. _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, /* pth_rwlk_t* */
  74. _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE, /* pth_rwlk_t*, long isW */
  75. _VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED, /* void*, long isW */
  76. _VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED, /* void* */
  77. _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST, /* pth_rwlk_t* */
  78. _VG_USERREQ__HG_POSIX_SEM_INIT_POST, /* sem_t*, ulong value */
  79. _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, /* sem_t* */
  80. _VG_USERREQ__HG_POSIX_SEM_RELEASED, /* void* */
  81. _VG_USERREQ__HG_POSIX_SEM_ACQUIRED, /* void* */
  82. _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, /* pth_bar_t*, ulong, ulong */
  83. _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, /* pth_bar_t* */
  84. _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, /* pth_bar_t* */
  85. _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE, /* pth_slk_t* */
  86. _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST, /* pth_slk_t* */
  87. _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE, /* pth_slk_t* */
  88. _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST, /* pth_slk_t* */
  89. _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE, /* pth_slk_t* */
  90. _VG_USERREQ__HG_CLIENTREQ_UNIMP, /* char* */
  91. _VG_USERREQ__HG_USERSO_SEND_PRE, /* arbitrary UWord SO-tag */
  92. _VG_USERREQ__HG_USERSO_RECV_POST, /* arbitrary UWord SO-tag */
  93. _VG_USERREQ__HG_USERSO_FORGET_ALL, /* arbitrary UWord SO-tag */
  94. _VG_USERREQ__HG_RESERVED2, /* Do not use */
  95. _VG_USERREQ__HG_RESERVED3, /* Do not use */
  96. _VG_USERREQ__HG_RESERVED4, /* Do not use */
  97. _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, /* Addr a, ulong len */
  98. _VG_USERREQ__HG_ARANGE_MAKE_TRACKED, /* Addr a, ulong len */
  99. _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, /* pth_bar_t*, ulong */
  100. _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, /* Addr start_of_block */
  101. _VG_USERREQ__HG_PTHREAD_COND_INIT_POST, /* pth_cond_t*, pth_cond_attr_t*/
  102. _VG_USERREQ__HG_GNAT_MASTER_HOOK, /* void*d,void*m,Word ml */
  103. _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK, /* void*s,Word ml */
  104. _VG_USERREQ__HG_GET_ABITS, /* Addr a,Addr abits, ulong len */
  105. _VG_USERREQ__HG_PTHREAD_CREATE_BEGIN,
  106. _VG_USERREQ__HG_PTHREAD_CREATE_END,
  107. _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE, /* pth_mx_t*,long isTryLock */
  108. _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST, /* pth_mx_t *,long tookLock */
  109. _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST, /* pth_rwlk_t*,long isW,long */
  110. _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE, /* pth_rwlk_t* */
  111. _VG_USERREQ__HG_POSIX_SEM_POST_PRE, /* sem_t* */
  112. _VG_USERREQ__HG_POSIX_SEM_POST_POST, /* sem_t* */
  113. _VG_USERREQ__HG_POSIX_SEM_WAIT_PRE, /* sem_t* */
  114. _VG_USERREQ__HG_POSIX_SEM_WAIT_POST, /* sem_t*, long tookLock */
  115. _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST, /* pth_cond_t* */
  116. _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST,/* pth_cond_t* */
  117. _VG_USERREQ__HG_RTLD_BIND_GUARD, /* int flags */
  118. _VG_USERREQ__HG_RTLD_BIND_CLEAR, /* int flags */
  119. _VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN /* void*d, void*m */
  120. } Vg_TCheckClientRequest;
  121. /*----------------------------------------------------------------*/
  122. /*--- ---*/
  123. /*--- Implementation-only facilities. Not for end-user use. ---*/
  124. /*--- For end-user facilities see below (the next section in ---*/
  125. /*--- this file.) ---*/
  126. /*--- ---*/
  127. /*----------------------------------------------------------------*/
  128. /* Do a client request. These are macros rather than a functions so
  129. as to avoid having an extra frame in stack traces.
  130. NB: these duplicate definitions in hg_intercepts.c. But here, we
  131. have to make do with weaker typing (no definition of Word etc) and
  132. no assertions, whereas in helgrind.h we can use those facilities.
  133. Obviously it's important the two sets of definitions are kept in
  134. sync.
  135. The commented-out asserts should actually hold, but unfortunately
  136. they can't be allowed to be visible here, because that would
  137. require the end-user code to #include <assert.h>.
  138. */
  139. #define DO_CREQ_v_W(_creqF, _ty1F,_arg1F) \
  140. do { \
  141. long int _arg1; \
  142. /* assert(sizeof(_ty1F) == sizeof(long int)); */ \
  143. _arg1 = (long int)(_arg1F); \
  144. VALGRIND_DO_CLIENT_REQUEST_STMT( \
  145. (_creqF), \
  146. _arg1, 0,0,0,0); \
  147. } while (0)
  148. #define DO_CREQ_W_W(_resF, _dfltF, _creqF, _ty1F,_arg1F) \
  149. do { \
  150. long int _arg1; \
  151. /* assert(sizeof(_ty1F) == sizeof(long int)); */ \
  152. _arg1 = (long int)(_arg1F); \
  153. _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR( \
  154. (_dfltF), \
  155. (_creqF), \
  156. _arg1, 0,0,0,0); \
  157. _resF = _qzz_res; \
  158. } while (0)
  159. #define DO_CREQ_v_WW(_creqF, _ty1F,_arg1F, _ty2F,_arg2F) \
  160. do { \
  161. long int _arg1, _arg2; \
  162. /* assert(sizeof(_ty1F) == sizeof(long int)); */ \
  163. /* assert(sizeof(_ty2F) == sizeof(long int)); */ \
  164. _arg1 = (long int)(_arg1F); \
  165. _arg2 = (long int)(_arg2F); \
  166. VALGRIND_DO_CLIENT_REQUEST_STMT( \
  167. (_creqF), \
  168. _arg1,_arg2,0,0,0); \
  169. } while (0)
  170. #define DO_CREQ_v_WWW(_creqF, _ty1F,_arg1F, \
  171. _ty2F,_arg2F, _ty3F, _arg3F) \
  172. do { \
  173. long int _arg1, _arg2, _arg3; \
  174. /* assert(sizeof(_ty1F) == sizeof(long int)); */ \
  175. /* assert(sizeof(_ty2F) == sizeof(long int)); */ \
  176. /* assert(sizeof(_ty3F) == sizeof(long int)); */ \
  177. _arg1 = (long int)(_arg1F); \
  178. _arg2 = (long int)(_arg2F); \
  179. _arg3 = (long int)(_arg3F); \
  180. VALGRIND_DO_CLIENT_REQUEST_STMT( \
  181. (_creqF), \
  182. _arg1,_arg2,_arg3,0,0); \
  183. } while (0)
  184. #define DO_CREQ_W_WWW(_resF, _dfltF, _creqF, _ty1F,_arg1F, \
  185. _ty2F,_arg2F, _ty3F, _arg3F) \
  186. do { \
  187. long int _qzz_res; \
  188. long int _arg1, _arg2, _arg3; \
  189. /* assert(sizeof(_ty1F) == sizeof(long int)); */ \
  190. _arg1 = (long int)(_arg1F); \
  191. _arg2 = (long int)(_arg2F); \
  192. _arg3 = (long int)(_arg3F); \
  193. _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR( \
  194. (_dfltF), \
  195. (_creqF), \
  196. _arg1,_arg2,_arg3,0,0); \
  197. _resF = _qzz_res; \
  198. } while (0)
  199. #define _HG_CLIENTREQ_UNIMP(_qzz_str) \
  200. DO_CREQ_v_W(_VG_USERREQ__HG_CLIENTREQ_UNIMP, \
  201. (char*),(_qzz_str))
  202. /*----------------------------------------------------------------*/
  203. /*--- ---*/
  204. /*--- Helgrind-native requests. These allow access to ---*/
  205. /*--- the same set of annotation primitives that are used ---*/
  206. /*--- to build the POSIX pthread wrappers. ---*/
  207. /*--- ---*/
  208. /*----------------------------------------------------------------*/
  209. /* ----------------------------------------------------------
  210. For describing ordinary mutexes (non-rwlocks). For rwlock
  211. descriptions see ANNOTATE_RWLOCK_* below.
  212. ---------------------------------------------------------- */
  213. /* Notify here immediately after mutex creation. _mbRec == 0 for a
  214. non-recursive mutex, 1 for a recursive mutex. */
  215. #define VALGRIND_HG_MUTEX_INIT_POST(_mutex, _mbRec) \
  216. DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, \
  217. void*,(_mutex), long,(_mbRec))
  218. /* Notify here immediately before mutex acquisition. _isTryLock == 0
  219. for a normal acquisition, 1 for a "try" style acquisition. */
  220. #define VALGRIND_HG_MUTEX_LOCK_PRE(_mutex, _isTryLock) \
  221. DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE, \
  222. void*,(_mutex), long,(_isTryLock))
  223. /* Notify here immediately after a successful mutex acquisition. */
  224. #define VALGRIND_HG_MUTEX_LOCK_POST(_mutex) \
  225. DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST, \
  226. void*,(_mutex))
  227. /* Notify here immediately before a mutex release. */
  228. #define VALGRIND_HG_MUTEX_UNLOCK_PRE(_mutex) \
  229. DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, \
  230. void*,(_mutex))
  231. /* Notify here immediately after a mutex release. */
  232. #define VALGRIND_HG_MUTEX_UNLOCK_POST(_mutex) \
  233. DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, \
  234. void*,(_mutex))
  235. /* Notify here immediately before mutex destruction. */
  236. #define VALGRIND_HG_MUTEX_DESTROY_PRE(_mutex) \
  237. DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, \
  238. void*,(_mutex))
  239. /* ----------------------------------------------------------
  240. For describing semaphores.
  241. ---------------------------------------------------------- */
  242. /* Notify here immediately after semaphore creation. */
  243. #define VALGRIND_HG_SEM_INIT_POST(_sem, _value) \
  244. DO_CREQ_v_WW(_VG_USERREQ__HG_POSIX_SEM_INIT_POST, \
  245. void*, (_sem), unsigned long, (_value))
  246. /* Notify here immediately after a semaphore wait (an acquire-style
  247. operation) */
  248. #define VALGRIND_HG_SEM_WAIT_POST(_sem) \
  249. DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_ACQUIRED, \
  250. void*,(_sem))
  251. /* Notify here immediately before semaphore post (a release-style
  252. operation) */
  253. #define VALGRIND_HG_SEM_POST_PRE(_sem) \
  254. DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_RELEASED, \
  255. void*,(_sem))
  256. /* Notify here immediately before semaphore destruction. */
  257. #define VALGRIND_HG_SEM_DESTROY_PRE(_sem) \
  258. DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, \
  259. void*, (_sem))
  260. /* ----------------------------------------------------------
  261. For describing barriers.
  262. ---------------------------------------------------------- */
  263. /* Notify here immediately before barrier creation. _count is the
  264. capacity. _resizable == 0 means the barrier may not be resized, 1
  265. means it may be. */
  266. #define VALGRIND_HG_BARRIER_INIT_PRE(_bar, _count, _resizable) \
  267. DO_CREQ_v_WWW(_VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, \
  268. void*,(_bar), \
  269. unsigned long,(_count), \
  270. unsigned long,(_resizable))
  271. /* Notify here immediately before arrival at a barrier. */
  272. #define VALGRIND_HG_BARRIER_WAIT_PRE(_bar) \
  273. DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, \
  274. void*,(_bar))
  275. /* Notify here immediately before a resize (change of barrier
  276. capacity). If _newcount >= the existing capacity, then there is no
  277. change in the state of any threads waiting at the barrier. If
  278. _newcount < the existing capacity, and >= _newcount threads are
  279. currently waiting at the barrier, then this notification is
  280. considered to also have the effect of telling the checker that all
  281. waiting threads have now moved past the barrier. (I can't think of
  282. any other sane semantics.) */
  283. #define VALGRIND_HG_BARRIER_RESIZE_PRE(_bar, _newcount) \
  284. DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, \
  285. void*,(_bar), \
  286. unsigned long,(_newcount))
  287. /* Notify here immediately before barrier destruction. */
  288. #define VALGRIND_HG_BARRIER_DESTROY_PRE(_bar) \
  289. DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, \
  290. void*,(_bar))
  291. /* ----------------------------------------------------------
  292. For describing memory ownership changes.
  293. ---------------------------------------------------------- */
  294. /* Clean memory state. This makes Helgrind forget everything it knew
  295. about the specified memory range. Effectively this announces that
  296. the specified memory range now "belongs" to the calling thread, so
  297. that: (1) the calling thread can access it safely without
  298. synchronisation, and (2) all other threads must sync with this one
  299. to access it safely. This is particularly useful for memory
  300. allocators that wish to recycle memory. */
  301. #define VALGRIND_HG_CLEAN_MEMORY(_qzz_start, _qzz_len) \
  302. DO_CREQ_v_WW(VG_USERREQ__HG_CLEAN_MEMORY, \
  303. void*,(_qzz_start), \
  304. unsigned long,(_qzz_len))
  305. /* The same, but for the heap block starting at _qzz_blockstart. This
  306. allows painting when we only know the address of an object, but not
  307. its size, which is sometimes the case in C++ code involving
  308. inheritance, and in which RTTI is not, for whatever reason,
  309. available. Returns the number of bytes painted, which can be zero
  310. for a zero-sized block. Hence, return values >= 0 indicate success
  311. (the block was found), and the value -1 indicates block not
  312. found, and -2 is returned when not running on Helgrind. */
  313. #define VALGRIND_HG_CLEAN_MEMORY_HEAPBLOCK(_qzz_blockstart) \
  314. (__extension__ \
  315. ({long int _npainted; \
  316. DO_CREQ_W_W(_npainted, (-2)/*default*/, \
  317. _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, \
  318. void*,(_qzz_blockstart)); \
  319. _npainted; \
  320. }))
  321. /* ----------------------------------------------------------
  322. For error control.
  323. ---------------------------------------------------------- */
  324. /* Tell H that an address range is not to be "tracked" until further
  325. notice. This puts it in the NOACCESS state, in which case we
  326. ignore all reads and writes to it. Useful for ignoring ranges of
  327. memory where there might be races we don't want to see. If the
  328. memory is subsequently reallocated via malloc/new/stack allocation,
  329. then it is put back in the trackable state. Hence it is safe in
  330. the situation where checking is disabled, the containing area is
  331. deallocated and later reallocated for some other purpose. */
  332. #define VALGRIND_HG_DISABLE_CHECKING(_qzz_start, _qzz_len) \
  333. DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, \
  334. void*,(_qzz_start), \
  335. unsigned long,(_qzz_len))
  336. /* And put it back into the normal "tracked" state, that is, make it
  337. once again subject to the normal race-checking machinery. This
  338. puts it in the same state as new memory allocated by this thread --
  339. that is, basically owned exclusively by this thread. */
  340. #define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len) \
  341. DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED, \
  342. void*,(_qzz_start), \
  343. unsigned long,(_qzz_len))
  344. /* Checks the accessibility bits for addresses [zza..zza+zznbytes-1].
  345. If zzabits array is provided, copy the accessibility bits in zzabits.
  346. Return values:
  347. -2 if not running on helgrind
  348. -1 if any parts of zzabits is not addressable
  349. >= 0 : success.
  350. When success, it returns the nr of addressable bytes found.
  351. So, to check that a whole range is addressable, check
  352. VALGRIND_HG_GET_ABITS(addr,NULL,len) == len
  353. In addition, if you want to examine the addressability of each
  354. byte of the range, you need to provide a non NULL ptr as
  355. second argument, pointing to an array of unsigned char
  356. of length len.
  357. Addressable bytes are indicated with 0xff.
  358. Non-addressable bytes are indicated with 0x00.
  359. */
  360. #define VALGRIND_HG_GET_ABITS(zza,zzabits,zznbytes) \
  361. (__extension__ \
  362. ({long int _res; \
  363. DO_CREQ_W_WWW(_res, (-2)/*default*/, \
  364. _VG_USERREQ__HG_GET_ABITS, \
  365. void*,(zza), void*,(zzabits), \
  366. unsigned long,(zznbytes)); \
  367. _res; \
  368. }))
  369. /* End-user request for Ada applications compiled with GNAT.
  370. Helgrind understands the Ada concept of Ada task dependencies and
  371. terminations. See Ada Reference Manual section 9.3 "Task Dependence
  372. - Termination of Tasks".
  373. However, in some cases, the master of (terminated) tasks completes
  374. only when the application exits. An example of this is dynamically
  375. allocated tasks with an access type defined at Library Level.
  376. By default, the state of such tasks in Helgrind will be 'exited but
  377. join not done yet'. Many tasks in such a state are however causing
  378. Helgrind CPU and memory to increase significantly.
  379. VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN can be used to indicate
  380. to Helgrind that a not yet completed master has however already
  381. 'seen' the termination of a dependent : this is conceptually the
  382. same as a pthread_join and causes the cleanup of the dependent
  383. as done by Helgrind when a master completes.
  384. This allows to avoid the overhead in helgrind caused by such tasks.
  385. A typical usage for a master to indicate it has done conceptually a join
  386. with a dependent task before the master completes is:
  387. while not Dep_Task'Terminated loop
  388. ... do whatever to wait for Dep_Task termination.
  389. end loop;
  390. VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN
  391. (Dep_Task'Identity,
  392. Ada.Task_Identification.Current_Task);
  393. Note that VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN should be a binding
  394. to a C function built with the below macro. */
  395. #define VALGRIND_HG_GNAT_DEPENDENT_MASTER_JOIN(_qzz_dep, _qzz_master) \
  396. DO_CREQ_v_WW(_VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN, \
  397. void*,(_qzz_dep), \
  398. void*,(_qzz_master))
  399. /*----------------------------------------------------------------*/
  400. /*--- ---*/
  401. /*--- ThreadSanitizer-compatible requests ---*/
  402. /*--- (mostly unimplemented) ---*/
  403. /*--- ---*/
  404. /*----------------------------------------------------------------*/
  405. /* A quite-broad set of annotations, as used in the ThreadSanitizer
  406. project. This implementation aims to be a (source-level)
  407. compatible implementation of the macros defined in:
  408. http://code.google.com/p/data-race-test/source
  409. /browse/trunk/dynamic_annotations/dynamic_annotations.h
  410. (some of the comments below are taken from the above file)
  411. The implementation here is very incomplete, and intended as a
  412. starting point. Many of the macros are unimplemented. Rather than
  413. allowing unimplemented macros to silently do nothing, they cause an
  414. assertion. Intention is to implement them on demand.
  415. The major use of these macros is to make visible to race detectors,
  416. the behaviour (effects) of user-implemented synchronisation
  417. primitives, that the detectors could not otherwise deduce from the
  418. normal observation of pthread etc calls.
  419. Some of the macros are no-ops in Helgrind. That's because Helgrind
  420. is a pure happens-before detector, whereas ThreadSanitizer uses a
  421. hybrid lockset and happens-before scheme, which requires more
  422. accurate annotations for correct operation.
  423. The macros are listed in the same order as in dynamic_annotations.h
  424. (URL just above).
  425. I should point out that I am less than clear about the intended
  426. semantics of quite a number of them. Comments and clarifications
  427. welcomed!
  428. */
  429. /* ----------------------------------------------------------------
  430. These four allow description of user-level condition variables,
  431. apparently in the style of POSIX's pthread_cond_t. Currently
  432. unimplemented and will assert.
  433. ----------------------------------------------------------------
  434. */
  435. /* Report that wait on the condition variable at address CV has
  436. succeeded and the lock at address LOCK is now held. CV and LOCK
  437. are completely arbitrary memory addresses which presumably mean
  438. something to the application, but are meaningless to Helgrind. */
  439. #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
  440. _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_LOCK_WAIT")
  441. /* Report that wait on the condition variable at CV has succeeded.
  442. Variant w/o lock. */
  443. #define ANNOTATE_CONDVAR_WAIT(cv) \
  444. _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_WAIT")
  445. /* Report that we are about to signal on the condition variable at
  446. address CV. */
  447. #define ANNOTATE_CONDVAR_SIGNAL(cv) \
  448. _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL")
  449. /* Report that we are about to signal_all on the condition variable at
  450. CV. */
  451. #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
  452. _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL_ALL")
  453. /* ----------------------------------------------------------------
  454. Create completely arbitrary happens-before edges between threads.
  455. If threads T1 .. Tn all do ANNOTATE_HAPPENS_BEFORE(obj) and later
  456. (w.r.t. some notional global clock for the computation) thread Tm
  457. does ANNOTATE_HAPPENS_AFTER(obj), then Helgrind will regard all
  458. memory accesses done by T1 .. Tn before the ..BEFORE.. call as
  459. happening-before all memory accesses done by Tm after the
  460. ..AFTER.. call. Hence Helgrind won't complain about races if Tm's
  461. accesses afterwards are to the same locations as accesses before by
  462. any of T1 .. Tn.
  463. OBJ is a machine word (unsigned long, or void*), is completely
  464. arbitrary, and denotes the identity of some synchronisation object
  465. you're modelling.
  466. You must do the _BEFORE call just before the real sync event on the
  467. signaller's side, and _AFTER just after the real sync event on the
  468. waiter's side.
  469. If none of the rest of these macros make sense to you, at least
  470. take the time to understand these two. They form the very essence
  471. of describing arbitrary inter-thread synchronisation events to
  472. Helgrind. You can get a long way just with them alone.
  473. See also, extensive discussion on semantics of this in
  474. https://bugs.kde.org/show_bug.cgi?id=243935
  475. ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) is interim until such time
  476. as bug 243935 is fully resolved. It instructs Helgrind to forget
  477. about any ANNOTATE_HAPPENS_BEFORE calls on the specified object, in
  478. effect putting it back in its original state. Once in that state,
  479. a use of ANNOTATE_HAPPENS_AFTER on it has no effect on the calling
  480. thread.
  481. An implementation may optionally release resources it has
  482. associated with 'obj' when ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj)
  483. happens. Users are recommended to use
  484. ANNOTATE_HAPPENS_BEFORE_FORGET_ALL to indicate when a
  485. synchronisation object is no longer needed, so as to avoid
  486. potential indefinite resource leaks.
  487. ----------------------------------------------------------------
  488. */
  489. #define ANNOTATE_HAPPENS_BEFORE(obj) \
  490. DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_SEND_PRE, void*,(obj))
  491. #define ANNOTATE_HAPPENS_AFTER(obj) \
  492. DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_RECV_POST, void*,(obj))
  493. #define ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) \
  494. DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_FORGET_ALL, void*,(obj))
  495. /* ----------------------------------------------------------------
  496. Memory publishing. The TSan sources say:
  497. Report that the bytes in the range [pointer, pointer+size) are about
  498. to be published safely. The race checker will create a happens-before
  499. arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to
  500. subsequent accesses to this memory.
  501. I'm not sure I understand what this means exactly, nor whether it
  502. is relevant for a pure h-b detector. Leaving unimplemented for
  503. now.
  504. ----------------------------------------------------------------
  505. */
  506. #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
  507. _HG_CLIENTREQ_UNIMP("ANNOTATE_PUBLISH_MEMORY_RANGE")
  508. /* DEPRECATED. Don't use it. */
  509. /* #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) */
  510. /* DEPRECATED. Don't use it. */
  511. /* #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) */
  512. /* ----------------------------------------------------------------
  513. TSan sources say:
  514. Instruct the tool to create a happens-before arc between
  515. MU->Unlock() and MU->Lock(). This annotation may slow down the
  516. race detector; normally it is used only when it would be
  517. difficult to annotate each of the mutex's critical sections
  518. individually using the annotations above.
  519. If MU is a posix pthread_mutex_t then Helgrind will do this anyway.
  520. In any case, leave as unimp for now. I'm unsure about the intended
  521. behaviour.
  522. ----------------------------------------------------------------
  523. */
  524. #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
  525. _HG_CLIENTREQ_UNIMP("ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX")
  526. /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
  527. /* #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) */
  528. /* ----------------------------------------------------------------
  529. TSan sources say:
  530. Annotations useful when defining memory allocators, or when
  531. memory that was protected in one way starts to be protected in
  532. another.
  533. Report that a new memory at "address" of size "size" has been
  534. allocated. This might be used when the memory has been retrieved
  535. from a free list and is about to be reused, or when a the locking
  536. discipline for a variable changes.
  537. AFAICS this is the same as VALGRIND_HG_CLEAN_MEMORY.
  538. ----------------------------------------------------------------
  539. */
  540. #define ANNOTATE_NEW_MEMORY(address, size) \
  541. VALGRIND_HG_CLEAN_MEMORY((address), (size))
  542. /* ----------------------------------------------------------------
  543. TSan sources say:
  544. Annotations useful when defining FIFO queues that transfer data
  545. between threads.
  546. All unimplemented. Am not claiming to understand this (yet).
  547. ----------------------------------------------------------------
  548. */
  549. /* Report that the producer-consumer queue object at address PCQ has
  550. been created. The ANNOTATE_PCQ_* annotations should be used only
  551. for FIFO queues. For non-FIFO queues use ANNOTATE_HAPPENS_BEFORE
  552. (for put) and ANNOTATE_HAPPENS_AFTER (for get). */
  553. #define ANNOTATE_PCQ_CREATE(pcq) \
  554. _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_CREATE")
  555. /* Report that the queue at address PCQ is about to be destroyed. */
  556. #define ANNOTATE_PCQ_DESTROY(pcq) \
  557. _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_DESTROY")
  558. /* Report that we are about to put an element into a FIFO queue at
  559. address PCQ. */
  560. #define ANNOTATE_PCQ_PUT(pcq) \
  561. _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_PUT")
  562. /* Report that we've just got an element from a FIFO queue at address
  563. PCQ. */
  564. #define ANNOTATE_PCQ_GET(pcq) \
  565. _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_GET")
  566. /* ----------------------------------------------------------------
  567. Annotations that suppress errors. It is usually better to express
  568. the program's synchronization using the other annotations, but
  569. these can be used when all else fails.
  570. Currently these are all unimplemented. I can't think of a simple
  571. way to implement them without at least some performance overhead.
  572. ----------------------------------------------------------------
  573. */
  574. /* Report that we may have a benign race at "pointer", with size
  575. "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the
  576. point where "pointer" has been allocated, preferably close to the point
  577. where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC.
  578. XXX: what's this actually supposed to do? And what's the type of
  579. DESCRIPTION? When does the annotation stop having an effect?
  580. */
  581. #define ANNOTATE_BENIGN_RACE(pointer, description) \
  582. _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE")
  583. /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
  584. the memory range [address, address+size). */
  585. #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
  586. VALGRIND_HG_DISABLE_CHECKING(address, size)
  587. /* Request the analysis tool to ignore all reads in the current thread
  588. until ANNOTATE_IGNORE_READS_END is called. Useful to ignore
  589. intentional racey reads, while still checking other reads and all
  590. writes. */
  591. #define ANNOTATE_IGNORE_READS_BEGIN() \
  592. _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_BEGIN")
  593. /* Stop ignoring reads. */
  594. #define ANNOTATE_IGNORE_READS_END() \
  595. _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_END")
  596. /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
  597. #define ANNOTATE_IGNORE_WRITES_BEGIN() \
  598. _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_BEGIN")
  599. /* Stop ignoring writes. */
  600. #define ANNOTATE_IGNORE_WRITES_END() \
  601. _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_END")
  602. /* Start ignoring all memory accesses (reads and writes). */
  603. #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
  604. do { \
  605. ANNOTATE_IGNORE_READS_BEGIN(); \
  606. ANNOTATE_IGNORE_WRITES_BEGIN(); \
  607. } while (0)
  608. /* Stop ignoring all memory accesses. */
  609. #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
  610. do { \
  611. ANNOTATE_IGNORE_WRITES_END(); \
  612. ANNOTATE_IGNORE_READS_END(); \
  613. } while (0)
  614. /* ----------------------------------------------------------------
  615. Annotations useful for debugging.
  616. Again, so for unimplemented, partly for performance reasons.
  617. ----------------------------------------------------------------
  618. */
  619. /* Request to trace every access to ADDRESS. */
  620. #define ANNOTATE_TRACE_MEMORY(address) \
  621. _HG_CLIENTREQ_UNIMP("ANNOTATE_TRACE_MEMORY")
  622. /* Report the current thread name to a race detector. */
  623. #define ANNOTATE_THREAD_NAME(name) \
  624. _HG_CLIENTREQ_UNIMP("ANNOTATE_THREAD_NAME")
  625. /* ----------------------------------------------------------------
  626. Annotations for describing behaviour of user-implemented lock
  627. primitives. In all cases, the LOCK argument is a completely
  628. arbitrary machine word (unsigned long, or void*) and can be any
  629. value which gives a unique identity to the lock objects being
  630. modelled.
  631. We just pretend they're ordinary posix rwlocks. That'll probably
  632. give some rather confusing wording in error messages, claiming that
  633. the arbitrary LOCK values are pthread_rwlock_t*'s, when in fact
  634. they are not. Ah well.
  635. ----------------------------------------------------------------
  636. */
  637. /* Report that a lock has just been created at address LOCK. */
  638. #define ANNOTATE_RWLOCK_CREATE(lock) \
  639. DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, \
  640. void*,(lock))
  641. /* Report that the lock at address LOCK is about to be destroyed. */
  642. #define ANNOTATE_RWLOCK_DESTROY(lock) \
  643. DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, \
  644. void*,(lock))
  645. /* Report that the lock at address LOCK has just been acquired.
  646. is_w=1 for writer lock, is_w=0 for reader lock. */
  647. #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
  648. DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED, \
  649. void*,(lock), unsigned long,(is_w))
  650. /* Report that the lock at address LOCK is about to be released. */
  651. #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
  652. DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED, \
  653. void*,(lock)) /* is_w is ignored */
  654. /* -------------------------------------------------------------
  655. Annotations useful when implementing barriers. They are not
  656. normally needed by modules that merely use barriers.
  657. The "barrier" argument is a pointer to the barrier object.
  658. ----------------------------------------------------------------
  659. */
  660. /* Report that the "barrier" has been initialized with initial
  661. "count". If 'reinitialization_allowed' is true, initialization is
  662. allowed to happen multiple times w/o calling barrier_destroy() */
  663. #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
  664. _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_INIT")
  665. /* Report that we are about to enter barrier_wait("barrier"). */
  666. #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
  667. _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
  668. /* Report that we just exited barrier_wait("barrier"). */
  669. #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
  670. _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
  671. /* Report that the "barrier" has been destroyed. */
  672. #define ANNOTATE_BARRIER_DESTROY(barrier) \
  673. _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
  674. /* ----------------------------------------------------------------
  675. Annotations useful for testing race detectors.
  676. ----------------------------------------------------------------
  677. */
  678. /* Report that we expect a race on the variable at ADDRESS. Use only
  679. in unit tests for a race detector. */
  680. #define ANNOTATE_EXPECT_RACE(address, description) \
  681. _HG_CLIENTREQ_UNIMP("ANNOTATE_EXPECT_RACE")
  682. /* A no-op. Insert where you like to test the interceptors. */
  683. #define ANNOTATE_NO_OP(arg) \
  684. _HG_CLIENTREQ_UNIMP("ANNOTATE_NO_OP")
  685. /* Force the race detector to flush its state. The actual effect depends on
  686. * the implementation of the detector. */
  687. #define ANNOTATE_FLUSH_STATE() \
  688. _HG_CLIENTREQ_UNIMP("ANNOTATE_FLUSH_STATE")
  689. #endif /* __HELGRIND_H */