OLD | NEW |
(Empty) | |
| 1 /* Copyright (c) 2008-2009, Google Inc. |
| 2 * All rights reserved. |
| 3 * |
| 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are |
| 6 * met: |
| 7 * |
| 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Neither the name of Google Inc. nor the names of its |
| 11 * contributors may be used to endorse or promote products derived from |
| 12 * this software without specific prior written permission. |
| 13 * |
| 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 15 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 16 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 17 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 18 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 20 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 25 * |
| 26 * --- |
| 27 * Author: Kostya Serebryany |
| 28 */ |
| 29 |
| 30 /* This file defines dynamic annotations for use with dynamic analysis |
| 31 tool such as valgrind, PIN, etc. |
| 32 |
| 33 Dynamic annotation is a source code annotation that affects |
| 34 the generated code (that is, the annotation is not a comment). |
| 35 Each such annotation is attached to a particular |
| 36 instruction and/or to a particular object (address) in the program. |
| 37 |
| 38 The annotations that should be used by users are macros in all upper-case |
| 39 (e.g., ANNOTATE_NEW_MEMORY). |
| 40 |
| 41 Actual implementation of these macros may differ depending on the |
| 42 dynamic analysis tool being used. |
| 43 |
| 44 See http://code.google.com/p/data-race-test/ for more information. |
| 45 |
| 46 This file supports the following dynamic analysis tools: |
| 47 - None (DYNAMIC_ANNOTATIONS_ENABLED is not defined or zero). |
| 48 Macros are defined empty. |
| 49 - ThreadSanitizer, Helgrind, DRD (DYNAMIC_ANNOTATIONS_ENABLED is 1). |
| 50 Macros are defined as calls to non-inlinable empty functions |
| 51 that are intercepted by Valgrind. */ |
| 52 |
| 53 #ifndef __DYNAMIC_ANNOTATIONS_H__ |
| 54 #define __DYNAMIC_ANNOTATIONS_H__ |
| 55 |
| 56 #ifndef DYNAMIC_ANNOTATIONS_ENABLED |
| 57 # define DYNAMIC_ANNOTATIONS_ENABLED 0 |
| 58 #endif |
| 59 |
| 60 #if DYNAMIC_ANNOTATIONS_ENABLED != 0 |
| 61 |
| 62 /* ------------------------------------------------------------- |
| 63 Annotations useful when implementing condition variables such as CondVar, |
| 64 using conditional critical sections (Await/LockWhen) and when constructing |
| 65 user-defined synchronization mechanisms. |
| 66 |
| 67 The annotations ANNOTATE_HAPPENS_BEFORE() and ANNOTATE_HAPPENS_AFTER() can |
| 68 be used to define happens-before arcs in user-defined synchronization |
| 69 mechanisms: the race detector will infer an arc from the former to the |
| 70 latter when they share the same argument pointer. |
| 71 |
| 72 Example 1 (reference counting): |
| 73 |
| 74 void Unref() { |
| 75 ANNOTATE_HAPPENS_BEFORE(&refcount_); |
| 76 if (AtomicDecrementByOne(&refcount_) == 0) { |
| 77 ANNOTATE_HAPPENS_AFTER(&refcount_); |
| 78 delete this; |
| 79 } |
| 80 } |
| 81 |
| 82 Example 2 (message queue): |
| 83 |
| 84 void MyQueue::Put(Type *e) { |
| 85 MutexLock lock(&mu_); |
| 86 ANNOTATE_HAPPENS_BEFORE(e); |
| 87 PutElementIntoMyQueue(e); |
| 88 } |
| 89 |
| 90 Type *MyQueue::Get() { |
| 91 MutexLock lock(&mu_); |
| 92 Type *e = GetElementFromMyQueue(); |
| 93 ANNOTATE_HAPPENS_AFTER(e); |
| 94 return e; |
| 95 } |
| 96 |
| 97 Note: when possible, please use the existing reference counting and message |
| 98 queue implementations instead of inventing new ones. */ |
| 99 |
| 100 /* Report that wait on the condition variable at address "cv" has succeeded |
| 101 and the lock at address "lock" is held. */ |
| 102 #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \ |
| 103 AnnotateCondVarWait(__FILE__, __LINE__, cv, lock) |
| 104 |
| 105 /* Report that wait on the condition variable at "cv" has succeeded. Variant |
| 106 w/o lock. */ |
| 107 #define ANNOTATE_CONDVAR_WAIT(cv) \ |
| 108 AnnotateCondVarWait(__FILE__, __LINE__, cv, NULL) |
| 109 |
| 110 /* Report that we are about to signal on the condition variable at address |
| 111 "cv". */ |
| 112 #define ANNOTATE_CONDVAR_SIGNAL(cv) \ |
| 113 AnnotateCondVarSignal(__FILE__, __LINE__, cv) |
| 114 |
| 115 /* Report that we are about to signal_all on the condition variable at address |
| 116 "cv". */ |
| 117 #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \ |
| 118 AnnotateCondVarSignalAll(__FILE__, __LINE__, cv) |
| 119 |
| 120 /* Annotations for user-defined synchronization mechanisms. */ |
| 121 #define ANNOTATE_HAPPENS_BEFORE(obj) ANNOTATE_CONDVAR_SIGNAL(obj) |
| 122 #define ANNOTATE_HAPPENS_AFTER(obj) ANNOTATE_CONDVAR_WAIT(obj) |
| 123 |
| 124 /* Report that the bytes in the range [pointer, pointer+size) are about |
| 125 to be published safely. The race checker will create a happens-before |
| 126 arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to |
| 127 subsequent accesses to this memory. |
| 128 Note: this annotation may not work properly if the race detector uses |
| 129 sampling, i.e. does not observe all memory accesses. |
| 130 */ |
| 131 #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \ |
| 132 AnnotatePublishMemoryRange(__FILE__, __LINE__, pointer, size) |
| 133 |
| 134 /* DEPRECATED. Don't use it. */ |
| 135 #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) \ |
| 136 AnnotateUnpublishMemoryRange(__FILE__, __LINE__, pointer, size) |
| 137 |
| 138 /* DEPRECATED. Don't use it. */ |
| 139 #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) \ |
| 140 do { \ |
| 141 ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size); \ |
| 142 ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size); \ |
| 143 } while (0) |
| 144 |
| 145 /* Instruct the tool to create a happens-before arc between mu->Unlock() and |
| 146 mu->Lock(). This annotation may slow down the race detector and hide real |
| 147 races. Normally it is used only when it would be difficult to annotate each |
| 148 of the mutex's critical sections individually using the annotations above. |
| 149 This annotation makes sense only for hybrid race detectors. For pure |
| 150 happens-before detectors this is a no-op. For more details see |
| 151 http://code.google.com/p/data-race-test/wiki/PureHappensBeforeVsHybrid . */ |
| 152 #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \ |
| 153 AnnotateMutexIsUsedAsCondVar(__FILE__, __LINE__, mu) |
| 154 |
| 155 /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */ |
| 156 #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) \ |
| 157 AnnotateMutexIsUsedAsCondVar(__FILE__, __LINE__, mu) |
| 158 |
| 159 /* ------------------------------------------------------------- |
| 160 Annotations useful when defining memory allocators, or when memory that |
| 161 was protected in one way starts to be protected in another. */ |
| 162 |
| 163 /* Report that a new memory at "address" of size "size" has been allocated. |
| 164 This might be used when the memory has been retrieved from a free list and |
| 165 is about to be reused, or when a the locking discipline for a variable |
| 166 changes. */ |
| 167 #define ANNOTATE_NEW_MEMORY(address, size) \ |
| 168 AnnotateNewMemory(__FILE__, __LINE__, address, size) |
| 169 |
| 170 /* ------------------------------------------------------------- |
| 171 Annotations useful when defining FIFO queues that transfer data between |
| 172 threads. */ |
| 173 |
| 174 /* Report that the producer-consumer queue (such as ProducerConsumerQueue) at |
| 175 address "pcq" has been created. The ANNOTATE_PCQ_* annotations |
| 176 should be used only for FIFO queues. For non-FIFO queues use |
| 177 ANNOTATE_HAPPENS_BEFORE (for put) and ANNOTATE_HAPPENS_AFTER (for get). */ |
| 178 #define ANNOTATE_PCQ_CREATE(pcq) \ |
| 179 AnnotatePCQCreate(__FILE__, __LINE__, pcq) |
| 180 |
| 181 /* Report that the queue at address "pcq" is about to be destroyed. */ |
| 182 #define ANNOTATE_PCQ_DESTROY(pcq) \ |
| 183 AnnotatePCQDestroy(__FILE__, __LINE__, pcq) |
| 184 |
| 185 /* Report that we are about to put an element into a FIFO queue at address |
| 186 "pcq". */ |
| 187 #define ANNOTATE_PCQ_PUT(pcq) \ |
| 188 AnnotatePCQPut(__FILE__, __LINE__, pcq) |
| 189 |
| 190 /* Report that we've just got an element from a FIFO queue at address |
| 191 "pcq". */ |
| 192 #define ANNOTATE_PCQ_GET(pcq) \ |
| 193 AnnotatePCQGet(__FILE__, __LINE__, pcq) |
| 194 |
| 195 /* ------------------------------------------------------------- |
| 196 Annotations that suppress errors. It is usually better to express the |
| 197 program's synchronization using the other annotations, but these can |
| 198 be used when all else fails. */ |
| 199 |
| 200 /* Report that we may have a benign race at "pointer", with size |
| 201 "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the |
| 202 point where "pointer" has been allocated, preferably close to the point |
| 203 where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. */ |
| 204 #define ANNOTATE_BENIGN_RACE(pointer, description) \ |
| 205 AnnotateBenignRaceSized(__FILE__, __LINE__, pointer, \ |
| 206 sizeof(*(pointer)), description) |
| 207 |
| 208 /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to |
| 209 the memory range [address, address+size). */ |
| 210 #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ |
| 211 AnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description) |
| 212 |
| 213 /* Request the analysis tool to ignore all reads in the current thread |
| 214 until ANNOTATE_IGNORE_READS_END is called. |
| 215 Useful to ignore intentional racey reads, while still checking |
| 216 other reads and all writes. |
| 217 See also ANNOTATE_UNPROTECTED_READ. */ |
| 218 #define ANNOTATE_IGNORE_READS_BEGIN() \ |
| 219 AnnotateIgnoreReadsBegin(__FILE__, __LINE__) |
| 220 |
| 221 /* Stop ignoring reads. */ |
| 222 #define ANNOTATE_IGNORE_READS_END() \ |
| 223 AnnotateIgnoreReadsEnd(__FILE__, __LINE__) |
| 224 |
| 225 /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */ |
| 226 #define ANNOTATE_IGNORE_WRITES_BEGIN() \ |
| 227 AnnotateIgnoreWritesBegin(__FILE__, __LINE__) |
| 228 |
| 229 /* Stop ignoring writes. */ |
| 230 #define ANNOTATE_IGNORE_WRITES_END() \ |
| 231 AnnotateIgnoreWritesEnd(__FILE__, __LINE__) |
| 232 |
| 233 /* Start ignoring all memory accesses (reads and writes). */ |
| 234 #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ |
| 235 do {\ |
| 236 ANNOTATE_IGNORE_READS_BEGIN();\ |
| 237 ANNOTATE_IGNORE_WRITES_BEGIN();\ |
| 238 }while(0)\ |
| 239 |
| 240 /* Stop ignoring all memory accesses. */ |
| 241 #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ |
| 242 do {\ |
| 243 ANNOTATE_IGNORE_WRITES_END();\ |
| 244 ANNOTATE_IGNORE_READS_END();\ |
| 245 }while(0)\ |
| 246 |
| 247 /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore synchronization events: |
| 248 RWLOCK* and CONDVAR*. */ |
| 249 #define ANNOTATE_IGNORE_SYNC_BEGIN() \ |
| 250 AnnotateIgnoreSyncBegin(__FILE__, __LINE__) |
| 251 |
| 252 /* Stop ignoring sync events. */ |
| 253 #define ANNOTATE_IGNORE_SYNC_END() \ |
| 254 AnnotateIgnoreSyncEnd(__FILE__, __LINE__) |
| 255 |
| 256 |
| 257 /* Enable (enable!=0) or disable (enable==0) race detection for all threads. |
| 258 This annotation could be useful if you want to skip expensive race analysis |
| 259 during some period of program execution, e.g. during initialization. */ |
| 260 #define ANNOTATE_ENABLE_RACE_DETECTION(enable) \ |
| 261 AnnotateEnableRaceDetection(__FILE__, __LINE__, enable) |
| 262 |
| 263 /* ------------------------------------------------------------- |
| 264 Annotations useful for debugging. */ |
| 265 |
| 266 /* Request to trace every access to "address". */ |
| 267 #define ANNOTATE_TRACE_MEMORY(address) \ |
| 268 AnnotateTraceMemory(__FILE__, __LINE__, address) |
| 269 |
| 270 /* Report the current thread name to a race detector. */ |
| 271 #define ANNOTATE_THREAD_NAME(name) \ |
| 272 AnnotateThreadName(__FILE__, __LINE__, name) |
| 273 |
| 274 /* ------------------------------------------------------------- |
| 275 Annotations useful when implementing locks. They are not |
| 276 normally needed by modules that merely use locks. |
| 277 The "lock" argument is a pointer to the lock object. */ |
| 278 |
| 279 /* Report that a lock has been created at address "lock". */ |
| 280 #define ANNOTATE_RWLOCK_CREATE(lock) \ |
| 281 AnnotateRWLockCreate(__FILE__, __LINE__, lock) |
| 282 |
| 283 /* Report that the lock at address "lock" is about to be destroyed. */ |
| 284 #define ANNOTATE_RWLOCK_DESTROY(lock) \ |
| 285 AnnotateRWLockDestroy(__FILE__, __LINE__, lock) |
| 286 |
| 287 /* Report that the lock at address "lock" has been acquired. |
| 288 is_w=1 for writer lock, is_w=0 for reader lock. */ |
| 289 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ |
| 290 AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w) |
| 291 |
| 292 /* Report that the lock at address "lock" is about to be released. */ |
| 293 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ |
| 294 AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w) |
| 295 |
| 296 /* ------------------------------------------------------------- |
| 297 Annotations useful when implementing barriers. They are not |
| 298 normally needed by modules that merely use barriers. |
| 299 The "barrier" argument is a pointer to the barrier object. */ |
| 300 |
| 301 /* Report that the "barrier" has been initialized with initial "count". |
| 302 If 'reinitialization_allowed' is true, initialization is allowed to happen |
| 303 multiple times w/o calling barrier_destroy() */ |
| 304 #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \ |
| 305 AnnotateBarrierInit(__FILE__, __LINE__, barrier, count, \ |
| 306 reinitialization_allowed) |
| 307 |
| 308 /* Report that we are about to enter barrier_wait("barrier"). */ |
| 309 #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \ |
| 310 AnnotateBarrierWaitBefore(__FILE__, __LINE__, barrier) |
| 311 |
| 312 /* Report that we just exited barrier_wait("barrier"). */ |
| 313 #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \ |
| 314 AnnotateBarrierWaitAfter(__FILE__, __LINE__, barrier) |
| 315 |
| 316 /* Report that the "barrier" has been destroyed. */ |
| 317 #define ANNOTATE_BARRIER_DESTROY(barrier) \ |
| 318 AnnotateBarrierDestroy(__FILE__, __LINE__, barrier) |
| 319 |
| 320 /* ------------------------------------------------------------- |
| 321 Annotations useful for testing race detectors. */ |
| 322 |
| 323 /* Report that we expect a race on the variable at "address". |
| 324 Use only in unit tests for a race detector. */ |
| 325 #define ANNOTATE_EXPECT_RACE(address, description) \ |
| 326 AnnotateExpectRace(__FILE__, __LINE__, address, description) |
| 327 |
| 328 /* A no-op. Insert where you like to test the interceptors. */ |
| 329 #define ANNOTATE_NO_OP(arg) \ |
| 330 AnnotateNoOp(__FILE__, __LINE__, arg) |
| 331 |
| 332 /* Force the race detector to flush its state. The actual effect depends on |
| 333 * the implementation of the detector. */ |
| 334 #define ANNOTATE_FLUSH_STATE() \ |
| 335 AnnotateFlushState(__FILE__, __LINE__) |
| 336 |
| 337 |
| 338 #else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */ |
| 339 |
| 340 #define ANNOTATE_RWLOCK_CREATE(lock) /* empty */ |
| 341 #define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */ |
| 342 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */ |
| 343 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */ |
| 344 #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) /* */ |
| 345 #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) /* empty */ |
| 346 #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) /* empty */ |
| 347 #define ANNOTATE_BARRIER_DESTROY(barrier) /* empty */ |
| 348 #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) /* empty */ |
| 349 #define ANNOTATE_CONDVAR_WAIT(cv) /* empty */ |
| 350 #define ANNOTATE_CONDVAR_SIGNAL(cv) /* empty */ |
| 351 #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) /* empty */ |
| 352 #define ANNOTATE_HAPPENS_BEFORE(obj) /* empty */ |
| 353 #define ANNOTATE_HAPPENS_AFTER(obj) /* empty */ |
| 354 #define ANNOTATE_PUBLISH_MEMORY_RANGE(address, size) /* empty */ |
| 355 #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(address, size) /* empty */ |
| 356 #define ANNOTATE_SWAP_MEMORY_RANGE(address, size) /* empty */ |
| 357 #define ANNOTATE_PCQ_CREATE(pcq) /* empty */ |
| 358 #define ANNOTATE_PCQ_DESTROY(pcq) /* empty */ |
| 359 #define ANNOTATE_PCQ_PUT(pcq) /* empty */ |
| 360 #define ANNOTATE_PCQ_GET(pcq) /* empty */ |
| 361 #define ANNOTATE_NEW_MEMORY(address, size) /* empty */ |
| 362 #define ANNOTATE_EXPECT_RACE(address, description) /* empty */ |
| 363 #define ANNOTATE_BENIGN_RACE(address, description) /* empty */ |
| 364 #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */ |
| 365 #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) /* empty */ |
| 366 #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) /* empty */ |
| 367 #define ANNOTATE_TRACE_MEMORY(arg) /* empty */ |
| 368 #define ANNOTATE_THREAD_NAME(name) /* empty */ |
| 369 #define ANNOTATE_IGNORE_READS_BEGIN() /* empty */ |
| 370 #define ANNOTATE_IGNORE_READS_END() /* empty */ |
| 371 #define ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */ |
| 372 #define ANNOTATE_IGNORE_WRITES_END() /* empty */ |
| 373 #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */ |
| 374 #define ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */ |
| 375 #define ANNOTATE_IGNORE_SYNC_BEGIN() /* empty */ |
| 376 #define ANNOTATE_IGNORE_SYNC_END() /* empty */ |
| 377 #define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */ |
| 378 #define ANNOTATE_NO_OP(arg) /* empty */ |
| 379 #define ANNOTATE_FLUSH_STATE() /* empty */ |
| 380 |
| 381 #endif /* DYNAMIC_ANNOTATIONS_ENABLED */ |
| 382 |
| 383 /* Use the macros above rather than using these functions directly. */ |
| 384 #ifdef __cplusplus |
| 385 extern "C" { |
| 386 #endif |
| 387 void AnnotateRWLockCreate(const char *file, int line, |
| 388 const volatile void *lock); |
| 389 void AnnotateRWLockDestroy(const char *file, int line, |
| 390 const volatile void *lock); |
| 391 void AnnotateRWLockAcquired(const char *file, int line, |
| 392 const volatile void *lock, long is_w); |
| 393 void AnnotateRWLockReleased(const char *file, int line, |
| 394 const volatile void *lock, long is_w); |
| 395 void AnnotateBarrierInit(const char *file, int line, |
| 396 const volatile void *barrier, long count, |
| 397 long reinitialization_allowed); |
| 398 void AnnotateBarrierWaitBefore(const char *file, int line, |
| 399 const volatile void *barrier); |
| 400 void AnnotateBarrierWaitAfter(const char *file, int line, |
| 401 const volatile void *barrier); |
| 402 void AnnotateBarrierDestroy(const char *file, int line, |
| 403 const volatile void *barrier); |
| 404 void AnnotateCondVarWait(const char *file, int line, |
| 405 const volatile void *cv, |
| 406 const volatile void *lock); |
| 407 void AnnotateCondVarSignal(const char *file, int line, |
| 408 const volatile void *cv); |
| 409 void AnnotateCondVarSignalAll(const char *file, int line, |
| 410 const volatile void *cv); |
| 411 void AnnotatePublishMemoryRange(const char *file, int line, |
| 412 const volatile void *address, |
| 413 long size); |
| 414 void AnnotateUnpublishMemoryRange(const char *file, int line, |
| 415 const volatile void *address, |
| 416 long size); |
| 417 void AnnotatePCQCreate(const char *file, int line, |
| 418 const volatile void *pcq); |
| 419 void AnnotatePCQDestroy(const char *file, int line, |
| 420 const volatile void *pcq); |
| 421 void AnnotatePCQPut(const char *file, int line, |
| 422 const volatile void *pcq); |
| 423 void AnnotatePCQGet(const char *file, int line, |
| 424 const volatile void *pcq); |
| 425 void AnnotateNewMemory(const char *file, int line, |
| 426 const volatile void *address, |
| 427 long size); |
| 428 void AnnotateExpectRace(const char *file, int line, |
| 429 const volatile void *address, |
| 430 const char *description); |
| 431 void AnnotateBenignRace(const char *file, int line, |
| 432 const volatile void *address, |
| 433 const char *description); |
| 434 void AnnotateBenignRaceSized(const char *file, int line, |
| 435 const volatile void *address, |
| 436 long size, |
| 437 const char *description); |
| 438 void AnnotateMutexIsUsedAsCondVar(const char *file, int line, |
| 439 const volatile void *mu); |
| 440 void AnnotateTraceMemory(const char *file, int line, |
| 441 const volatile void *arg); |
| 442 void AnnotateThreadName(const char *file, int line, |
| 443 const char *name); |
| 444 void AnnotateIgnoreReadsBegin(const char *file, int line); |
| 445 void AnnotateIgnoreReadsEnd(const char *file, int line); |
| 446 void AnnotateIgnoreWritesBegin(const char *file, int line); |
| 447 void AnnotateIgnoreWritesEnd(const char *file, int line); |
| 448 void AnnotateEnableRaceDetection(const char *file, int line, int enable); |
| 449 void AnnotateNoOp(const char *file, int line, |
| 450 const volatile void *arg); |
| 451 void AnnotateFlushState(const char *file, int line); |
| 452 |
| 453 /* Return non-zero value if running under valgrind. |
| 454 |
| 455 If "valgrind.h" is included into dynamic_annotations.c, |
| 456 the regular valgrind mechanism will be used. |
| 457 See http://valgrind.org/docs/manual/manual-core-adv.html about |
| 458 RUNNING_ON_VALGRIND and other valgrind "client requests". |
| 459 The file "valgrind.h" may be obtained by doing |
| 460 svn co svn://svn.valgrind.org/valgrind/trunk/include |
| 461 |
| 462 If for some reason you can't use "valgrind.h" or want to fake valgrind, |
| 463 there are two ways to make this function return non-zero: |
| 464 - Use environment variable: export RUNNING_ON_VALGRIND=1 |
| 465 - Make your tool intercept the function RunningOnValgrind() and |
| 466 change its return value. |
| 467 */ |
| 468 int RunningOnValgrind(void); |
| 469 |
| 470 #ifdef __cplusplus |
| 471 } |
| 472 #endif |
| 473 |
| 474 #if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus) |
| 475 |
| 476 /* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. |
| 477 |
| 478 Instead of doing |
| 479 ANNOTATE_IGNORE_READS_BEGIN(); |
| 480 ... = x; |
| 481 ANNOTATE_IGNORE_READS_END(); |
| 482 one can use |
| 483 ... = ANNOTATE_UNPROTECTED_READ(x); */ |
| 484 template <class T> |
| 485 inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x) { |
| 486 ANNOTATE_IGNORE_READS_BEGIN(); |
| 487 T res = x; |
| 488 ANNOTATE_IGNORE_READS_END(); |
| 489 return res; |
| 490 } |
| 491 /* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */ |
| 492 #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ |
| 493 namespace { \ |
| 494 class static_var ## _annotator { \ |
| 495 public: \ |
| 496 static_var ## _annotator() { \ |
| 497 ANNOTATE_BENIGN_RACE_SIZED(&static_var, \ |
| 498 sizeof(static_var), \ |
| 499 # static_var ": " description); \ |
| 500 } \ |
| 501 }; \ |
| 502 static static_var ## _annotator the ## static_var ## _annotator;\ |
| 503 } |
| 504 #else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */ |
| 505 |
| 506 #define ANNOTATE_UNPROTECTED_READ(x) (x) |
| 507 #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) /* empty */ |
| 508 |
| 509 #endif /* DYNAMIC_ANNOTATIONS_ENABLED */ |
| 510 |
| 511 #endif /* __DYNAMIC_ANNOTATIONS_H__ */ |
OLD | NEW |