| OLD | NEW |
| (Empty) |
| 1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ | |
| 2 /* This Source Code Form is subject to the terms of the Mozilla Public | |
| 3 * License, v. 2.0. If a copy of the MPL was not distributed with this | |
| 4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ | |
| 5 | |
| 6 /* | |
| 7 ** Thread safe versions of malloc, free, realloc, calloc and cfree. | |
| 8 */ | |
| 9 | |
| 10 #include "primpl.h" | |
| 11 | |
| 12 #ifdef _PR_ZONE_ALLOCATOR | |
| 13 | |
| 14 /* | |
| 15 ** The zone allocator code must use native mutexes and cannot | |
| 16 ** use PRLocks because PR_NewLock calls PR_Calloc, resulting | |
| 17 ** in cyclic dependency of initialization. | |
| 18 */ | |
| 19 | |
| 20 #include <string.h> | |
| 21 | |
| 22 union memBlkHdrUn; | |
| 23 | |
| 24 typedef struct MemoryZoneStr { | |
| 25 union memBlkHdrUn *head; /* free list */ | |
| 26 pthread_mutex_t lock; | |
| 27 size_t blockSize; /* size of blocks on this free list */ | |
| 28 PRUint32 locked; /* current state of lock */ | |
| 29 PRUint32 contention; /* counter: had to wait for lock */ | |
| 30 PRUint32 hits; /* allocated from free list */ | |
| 31 PRUint32 misses; /* had to call malloc */ | |
| 32 PRUint32 elements; /* on free list */ | |
| 33 } MemoryZone; | |
| 34 | |
| 35 typedef union memBlkHdrUn { | |
| 36 unsigned char filler[48]; /* fix the size of this beast */ | |
| 37 struct memBlkHdrStr { | |
| 38 union memBlkHdrUn *next; | |
| 39 MemoryZone *zone; | |
| 40 size_t blockSize; | |
| 41 size_t requestedSize; | |
| 42 PRUint32 magic; | |
| 43 } s; | |
| 44 } MemBlockHdr; | |
| 45 | |
| 46 #define MEM_ZONES 7 | |
| 47 #define THREAD_POOLS 11 /* prime number for modulus */ | |
| 48 #define ZONE_MAGIC 0x0BADC0DE | |
| 49 | |
| 50 static MemoryZone zones[MEM_ZONES][THREAD_POOLS]; | |
| 51 | |
| 52 static PRBool use_zone_allocator = PR_FALSE; | |
| 53 | |
| 54 static void pr_ZoneFree(void *ptr); | |
| 55 | |
| 56 void | |
| 57 _PR_DestroyZones(void) | |
| 58 { | |
| 59 int i, j; | |
| 60 | |
| 61 if (!use_zone_allocator) | |
| 62 return; | |
| 63 | |
| 64 for (j = 0; j < THREAD_POOLS; j++) { | |
| 65 for (i = 0; i < MEM_ZONES; i++) { | |
| 66 MemoryZone *mz = &zones[i][j]; | |
| 67 pthread_mutex_destroy(&mz->lock); | |
| 68 while (mz->head) { | |
| 69 MemBlockHdr *hdr = mz->head; | |
| 70 mz->head = hdr->s.next; /* unlink it */ | |
| 71 free(hdr); | |
| 72 mz->elements--; | |
| 73 } | |
| 74 } | |
| 75 } | |
| 76 use_zone_allocator = PR_FALSE; | |
| 77 } | |
| 78 | |
| 79 /* | |
| 80 ** pr_FindSymbolInProg | |
| 81 ** | |
| 82 ** Find the specified data symbol in the program and return | |
| 83 ** its address. | |
| 84 */ | |
| 85 | |
| 86 #ifdef HAVE_DLL | |
| 87 | |
| 88 #if defined(USE_DLFCN) && !defined(NO_DLOPEN_NULL) | |
| 89 | |
| 90 #include <dlfcn.h> | |
| 91 | |
| 92 static void * | |
| 93 pr_FindSymbolInProg(const char *name) | |
| 94 { | |
| 95 void *h; | |
| 96 void *sym; | |
| 97 | |
| 98 h = dlopen(0, RTLD_LAZY); | |
| 99 if (h == NULL) | |
| 100 return NULL; | |
| 101 sym = dlsym(h, name); | |
| 102 (void)dlclose(h); | |
| 103 return sym; | |
| 104 } | |
| 105 | |
| 106 #elif defined(USE_HPSHL) | |
| 107 | |
| 108 #include <dl.h> | |
| 109 | |
| 110 static void * | |
| 111 pr_FindSymbolInProg(const char *name) | |
| 112 { | |
| 113 shl_t h = NULL; | |
| 114 void *sym; | |
| 115 | |
| 116 if (shl_findsym(&h, name, TYPE_DATA, &sym) == -1) | |
| 117 return NULL; | |
| 118 return sym; | |
| 119 } | |
| 120 | |
| 121 #elif defined(USE_MACH_DYLD) || defined(NO_DLOPEN_NULL) | |
| 122 | |
| 123 static void * | |
| 124 pr_FindSymbolInProg(const char *name) | |
| 125 { | |
| 126 /* FIXME: not implemented */ | |
| 127 return NULL; | |
| 128 } | |
| 129 | |
| 130 #else | |
| 131 | |
| 132 #error "The zone allocator is not supported on this platform" | |
| 133 | |
| 134 #endif | |
| 135 | |
| 136 #else /* !defined(HAVE_DLL) */ | |
| 137 | |
| 138 static void * | |
| 139 pr_FindSymbolInProg(const char *name) | |
| 140 { | |
| 141 /* can't be implemented */ | |
| 142 return NULL; | |
| 143 } | |
| 144 | |
| 145 #endif /* HAVE_DLL */ | |
| 146 | |
| 147 void | |
| 148 _PR_InitZones(void) | |
| 149 { | |
| 150 int i, j; | |
| 151 char *envp; | |
| 152 PRBool *sym; | |
| 153 | |
| 154 if ((sym = (PRBool *)pr_FindSymbolInProg("nspr_use_zone_allocator")) != NULL
) { | |
| 155 use_zone_allocator = *sym; | |
| 156 } else if ((envp = getenv("NSPR_USE_ZONE_ALLOCATOR")) != NULL) { | |
| 157 use_zone_allocator = (atoi(envp) == 1); | |
| 158 } | |
| 159 | |
| 160 if (!use_zone_allocator) | |
| 161 return; | |
| 162 | |
| 163 for (j = 0; j < THREAD_POOLS; j++) { | |
| 164 for (i = 0; i < MEM_ZONES; i++) { | |
| 165 MemoryZone *mz = &zones[i][j]; | |
| 166 int rv = pthread_mutex_init(&mz->lock, NULL); | |
| 167 PR_ASSERT(0 == rv); | |
| 168 if (rv != 0) { | |
| 169 goto loser; | |
| 170 } | |
| 171 mz->blockSize = 16 << ( 2 * i); | |
| 172 } | |
| 173 } | |
| 174 return; | |
| 175 | |
| 176 loser: | |
| 177 _PR_DestroyZones(); | |
| 178 return; | |
| 179 } | |
| 180 | |
| 181 PR_IMPLEMENT(void) | |
| 182 PR_FPrintZoneStats(PRFileDesc *debug_out) | |
| 183 { | |
| 184 int i, j; | |
| 185 | |
| 186 for (j = 0; j < THREAD_POOLS; j++) { | |
| 187 for (i = 0; i < MEM_ZONES; i++) { | |
| 188 MemoryZone *mz = &zones[i][j]; | |
| 189 MemoryZone zone = *mz; | |
| 190 if (zone.elements || zone.misses || zone.hits) { | |
| 191 PR_fprintf(debug_out, | |
| 192 "pool: %d, zone: %d, size: %d, free: %d, hit: %d, miss: %d, contend: %d\n", | |
| 193 j, i, zone.blockSize, zone.elements, | |
| 194 zone.hits, zone.misses, zone.contention); | |
| 195 } | |
| 196 } | |
| 197 } | |
| 198 } | |
| 199 | |
| 200 static void * | |
| 201 pr_ZoneMalloc(PRUint32 size) | |
| 202 { | |
| 203 void *rv; | |
| 204 unsigned int zone; | |
| 205 size_t blockSize; | |
| 206 MemBlockHdr *mb, *mt; | |
| 207 MemoryZone *mz; | |
| 208 | |
| 209 /* Always allocate a non-zero amount of bytes */ | |
| 210 if (size < 1) { | |
| 211 size = 1; | |
| 212 } | |
| 213 for (zone = 0, blockSize = 16; zone < MEM_ZONES; ++zone, blockSize <<= 2) { | |
| 214 if (size <= blockSize) { | |
| 215 break; | |
| 216 } | |
| 217 } | |
| 218 if (zone < MEM_ZONES) { | |
| 219 pthread_t me = pthread_self(); | |
| 220 unsigned int pool = (PRUptrdiff)me % THREAD_POOLS; | |
| 221 PRUint32 wasLocked; | |
| 222 mz = &zones[zone][pool]; | |
| 223 wasLocked = mz->locked; | |
| 224 pthread_mutex_lock(&mz->lock); | |
| 225 mz->locked = 1; | |
| 226 if (wasLocked) | |
| 227 mz->contention++; | |
| 228 if (mz->head) { | |
| 229 mb = mz->head; | |
| 230 PR_ASSERT(mb->s.magic == ZONE_MAGIC); | |
| 231 PR_ASSERT(mb->s.zone == mz); | |
| 232 PR_ASSERT(mb->s.blockSize == blockSize); | |
| 233 PR_ASSERT(mz->blockSize == blockSize); | |
| 234 | |
| 235 mt = (MemBlockHdr *)(((char *)(mb + 1)) + blockSize); | |
| 236 PR_ASSERT(mt->s.magic == ZONE_MAGIC); | |
| 237 PR_ASSERT(mt->s.zone == mz); | |
| 238 PR_ASSERT(mt->s.blockSize == blockSize); | |
| 239 | |
| 240 mz->hits++; | |
| 241 mz->elements--; | |
| 242 mz->head = mb->s.next; /* take off free list */ | |
| 243 mz->locked = 0; | |
| 244 pthread_mutex_unlock(&mz->lock); | |
| 245 | |
| 246 mt->s.next = mb->s.next = NULL; | |
| 247 mt->s.requestedSize = mb->s.requestedSize = size; | |
| 248 | |
| 249 rv = (void *)(mb + 1); | |
| 250 return rv; | |
| 251 } | |
| 252 | |
| 253 mz->misses++; | |
| 254 mz->locked = 0; | |
| 255 pthread_mutex_unlock(&mz->lock); | |
| 256 | |
| 257 mb = (MemBlockHdr *)malloc(blockSize + 2 * (sizeof *mb)); | |
| 258 if (!mb) { | |
| 259 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); | |
| 260 return NULL; | |
| 261 } | |
| 262 mb->s.next = NULL; | |
| 263 mb->s.zone = mz; | |
| 264 mb->s.magic = ZONE_MAGIC; | |
| 265 mb->s.blockSize = blockSize; | |
| 266 mb->s.requestedSize = size; | |
| 267 | |
| 268 mt = (MemBlockHdr *)(((char *)(mb + 1)) + blockSize); | |
| 269 memcpy(mt, mb, sizeof *mb); | |
| 270 | |
| 271 rv = (void *)(mb + 1); | |
| 272 return rv; | |
| 273 } | |
| 274 | |
| 275 /* size was too big. Create a block with no zone */ | |
| 276 blockSize = (size & 15) ? size + 16 - (size & 15) : size; | |
| 277 mb = (MemBlockHdr *)malloc(blockSize + 2 * (sizeof *mb)); | |
| 278 if (!mb) { | |
| 279 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); | |
| 280 return NULL; | |
| 281 } | |
| 282 mb->s.next = NULL; | |
| 283 mb->s.zone = NULL; | |
| 284 mb->s.magic = ZONE_MAGIC; | |
| 285 mb->s.blockSize = blockSize; | |
| 286 mb->s.requestedSize = size; | |
| 287 | |
| 288 mt = (MemBlockHdr *)(((char *)(mb + 1)) + blockSize); | |
| 289 memcpy(mt, mb, sizeof *mb); | |
| 290 | |
| 291 rv = (void *)(mb + 1); | |
| 292 return rv; | |
| 293 } | |
| 294 | |
| 295 | |
| 296 static void * | |
| 297 pr_ZoneCalloc(PRUint32 nelem, PRUint32 elsize) | |
| 298 { | |
| 299 PRUint32 size = nelem * elsize; | |
| 300 void *p = pr_ZoneMalloc(size); | |
| 301 if (p) { | |
| 302 memset(p, 0, size); | |
| 303 } | |
| 304 return p; | |
| 305 } | |
| 306 | |
| 307 static void * | |
| 308 pr_ZoneRealloc(void *oldptr, PRUint32 bytes) | |
| 309 { | |
| 310 void *rv; | |
| 311 MemBlockHdr *mb; | |
| 312 int ours; | |
| 313 MemBlockHdr phony; | |
| 314 | |
| 315 if (!oldptr) | |
| 316 return pr_ZoneMalloc(bytes); | |
| 317 mb = (MemBlockHdr *)((char *)oldptr - (sizeof *mb)); | |
| 318 if (mb->s.magic != ZONE_MAGIC) { | |
| 319 /* Maybe this just came from ordinary malloc */ | |
| 320 #ifdef DEBUG | |
| 321 fprintf(stderr, | |
| 322 "Warning: reallocing memory block %p from ordinary malloc\n", | |
| 323 oldptr); | |
| 324 #endif | |
| 325 /* | |
| 326 * We are going to realloc oldptr. If realloc succeeds, the | |
| 327 * original value of oldptr will point to freed memory. So this | |
| 328 * function must not fail after a successfull realloc call. We | |
| 329 * must perform any operation that may fail before the realloc | |
| 330 * call. | |
| 331 */ | |
| 332 rv = pr_ZoneMalloc(bytes); /* this may fail */ | |
| 333 if (!rv) { | |
| 334 return rv; | |
| 335 } | |
| 336 | |
| 337 /* We don't know how big it is. But we can fix that. */ | |
| 338 oldptr = realloc(oldptr, bytes); | |
| 339 /* | |
| 340 * If realloc returns NULL, this function loses the original | |
| 341 * value of oldptr. This isn't a leak because the caller of | |
| 342 * this function still has the original value of oldptr. | |
| 343 */ | |
| 344 if (!oldptr) { | |
| 345 if (bytes) { | |
| 346 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); | |
| 347 pr_ZoneFree(rv); | |
| 348 return oldptr; | |
| 349 } | |
| 350 } | |
| 351 phony.s.requestedSize = bytes; | |
| 352 mb = &phony; | |
| 353 ours = 0; | |
| 354 } else { | |
| 355 size_t blockSize = mb->s.blockSize; | |
| 356 MemBlockHdr *mt = (MemBlockHdr *)(((char *)(mb + 1)) + blockSize); | |
| 357 | |
| 358 PR_ASSERT(mt->s.magic == ZONE_MAGIC); | |
| 359 PR_ASSERT(mt->s.zone == mb->s.zone); | |
| 360 PR_ASSERT(mt->s.blockSize == blockSize); | |
| 361 | |
| 362 if (bytes <= blockSize) { | |
| 363 /* The block is already big enough. */ | |
| 364 mt->s.requestedSize = mb->s.requestedSize = bytes; | |
| 365 return oldptr; | |
| 366 } | |
| 367 ours = 1; | |
| 368 rv = pr_ZoneMalloc(bytes); | |
| 369 if (!rv) { | |
| 370 return rv; | |
| 371 } | |
| 372 } | |
| 373 | |
| 374 if (oldptr && mb->s.requestedSize) | |
| 375 memcpy(rv, oldptr, mb->s.requestedSize); | |
| 376 if (ours) | |
| 377 pr_ZoneFree(oldptr); | |
| 378 else if (oldptr) | |
| 379 free(oldptr); | |
| 380 return rv; | |
| 381 } | |
| 382 | |
| 383 static void | |
| 384 pr_ZoneFree(void *ptr) | |
| 385 { | |
| 386 MemBlockHdr *mb, *mt; | |
| 387 MemoryZone *mz; | |
| 388 size_t blockSize; | |
| 389 PRUint32 wasLocked; | |
| 390 | |
| 391 if (!ptr) | |
| 392 return; | |
| 393 | |
| 394 mb = (MemBlockHdr *)((char *)ptr - (sizeof *mb)); | |
| 395 | |
| 396 if (mb->s.magic != ZONE_MAGIC) { | |
| 397 /* maybe this came from ordinary malloc */ | |
| 398 #ifdef DEBUG | |
| 399 fprintf(stderr, | |
| 400 "Warning: freeing memory block %p from ordinary malloc\n", ptr); | |
| 401 #endif | |
| 402 free(ptr); | |
| 403 return; | |
| 404 } | |
| 405 | |
| 406 blockSize = mb->s.blockSize; | |
| 407 mz = mb->s.zone; | |
| 408 mt = (MemBlockHdr *)(((char *)(mb + 1)) + blockSize); | |
| 409 PR_ASSERT(mt->s.magic == ZONE_MAGIC); | |
| 410 PR_ASSERT(mt->s.zone == mz); | |
| 411 PR_ASSERT(mt->s.blockSize == blockSize); | |
| 412 if (!mz) { | |
| 413 PR_ASSERT(blockSize > 65536); | |
| 414 /* This block was not in any zone. Just free it. */ | |
| 415 free(mb); | |
| 416 return; | |
| 417 } | |
| 418 PR_ASSERT(mz->blockSize == blockSize); | |
| 419 wasLocked = mz->locked; | |
| 420 pthread_mutex_lock(&mz->lock); | |
| 421 mz->locked = 1; | |
| 422 if (wasLocked) | |
| 423 mz->contention++; | |
| 424 mt->s.next = mb->s.next = mz->head; /* put on head of list */ | |
| 425 mz->head = mb; | |
| 426 mz->elements++; | |
| 427 mz->locked = 0; | |
| 428 pthread_mutex_unlock(&mz->lock); | |
| 429 } | |
| 430 | |
| 431 PR_IMPLEMENT(void *) PR_Malloc(PRUint32 size) | |
| 432 { | |
| 433 if (!_pr_initialized) _PR_ImplicitInitialization(); | |
| 434 | |
| 435 return use_zone_allocator ? pr_ZoneMalloc(size) : malloc(size); | |
| 436 } | |
| 437 | |
| 438 PR_IMPLEMENT(void *) PR_Calloc(PRUint32 nelem, PRUint32 elsize) | |
| 439 { | |
| 440 if (!_pr_initialized) _PR_ImplicitInitialization(); | |
| 441 | |
| 442 return use_zone_allocator ? | |
| 443 pr_ZoneCalloc(nelem, elsize) : calloc(nelem, elsize); | |
| 444 } | |
| 445 | |
| 446 PR_IMPLEMENT(void *) PR_Realloc(void *ptr, PRUint32 size) | |
| 447 { | |
| 448 if (!_pr_initialized) _PR_ImplicitInitialization(); | |
| 449 | |
| 450 return use_zone_allocator ? pr_ZoneRealloc(ptr, size) : realloc(ptr, size); | |
| 451 } | |
| 452 | |
| 453 PR_IMPLEMENT(void) PR_Free(void *ptr) | |
| 454 { | |
| 455 if (use_zone_allocator) | |
| 456 pr_ZoneFree(ptr); | |
| 457 else | |
| 458 free(ptr); | |
| 459 } | |
| 460 | |
| 461 #else /* !defined(_PR_ZONE_ALLOCATOR) */ | |
| 462 | |
| 463 /* | |
| 464 ** The PR_Malloc, PR_Calloc, PR_Realloc, and PR_Free functions simply | |
| 465 ** call their libc equivalents now. This may seem redundant, but it | |
| 466 ** ensures that we are calling into the same runtime library. On | |
| 467 ** Win32, it is possible to have multiple runtime libraries (e.g., | |
| 468 ** objects compiled with /MD and /MDd) in the same process, and | |
| 469 ** they maintain separate heaps, which cannot be mixed. | |
| 470 */ | |
| 471 PR_IMPLEMENT(void *) PR_Malloc(PRUint32 size) | |
| 472 { | |
| 473 #if defined (WIN16) | |
| 474 return PR_MD_malloc( (size_t) size); | |
| 475 #else | |
| 476 return malloc(size); | |
| 477 #endif | |
| 478 } | |
| 479 | |
| 480 PR_IMPLEMENT(void *) PR_Calloc(PRUint32 nelem, PRUint32 elsize) | |
| 481 { | |
| 482 #if defined (WIN16) | |
| 483 return PR_MD_calloc( (size_t)nelem, (size_t)elsize ); | |
| 484 | |
| 485 #else | |
| 486 return calloc(nelem, elsize); | |
| 487 #endif | |
| 488 } | |
| 489 | |
| 490 PR_IMPLEMENT(void *) PR_Realloc(void *ptr, PRUint32 size) | |
| 491 { | |
| 492 #if defined (WIN16) | |
| 493 return PR_MD_realloc( ptr, (size_t) size); | |
| 494 #else | |
| 495 return realloc(ptr, size); | |
| 496 #endif | |
| 497 } | |
| 498 | |
| 499 PR_IMPLEMENT(void) PR_Free(void *ptr) | |
| 500 { | |
| 501 #if defined (WIN16) | |
| 502 PR_MD_free( ptr ); | |
| 503 #else | |
| 504 free(ptr); | |
| 505 #endif | |
| 506 } | |
| 507 | |
| 508 #endif /* _PR_ZONE_ALLOCATOR */ | |
| 509 | |
| 510 /* | |
| 511 ** Complexity alert! | |
| 512 ** | |
| 513 ** If malloc/calloc/free (etc.) were implemented to use pr lock's then | |
| 514 ** the entry points could block when called if some other thread had the | |
| 515 ** lock. | |
| 516 ** | |
| 517 ** Most of the time this isn't a problem. However, in the case that we | |
| 518 ** are using the thread safe malloc code after PR_Init but before | |
| 519 ** PR_AttachThread has been called (on a native thread that nspr has yet | |
| 520 ** to be told about) we could get royally screwed if the lock was busy | |
| 521 ** and we tried to context switch the thread away. In this scenario | |
| 522 ** PR_CURRENT_THREAD() == NULL | |
| 523 ** | |
| 524 ** To avoid this unfortunate case, we use the low level locking | |
| 525 ** facilities for malloc protection instead of the slightly higher level | |
| 526 ** locking. This makes malloc somewhat faster so maybe it's a good thing | |
| 527 ** anyway. | |
| 528 */ | |
| 529 #ifdef _PR_OVERRIDE_MALLOC | |
| 530 | |
| 531 /* Imports */ | |
| 532 extern void *_PR_UnlockedMalloc(size_t size); | |
| 533 extern void *_PR_UnlockedMemalign(size_t alignment, size_t size); | |
| 534 extern void _PR_UnlockedFree(void *ptr); | |
| 535 extern void *_PR_UnlockedRealloc(void *ptr, size_t size); | |
| 536 extern void *_PR_UnlockedCalloc(size_t n, size_t elsize); | |
| 537 | |
| 538 static PRBool _PR_malloc_initialised = PR_FALSE; | |
| 539 | |
| 540 #ifdef _PR_PTHREADS | |
| 541 static pthread_mutex_t _PR_MD_malloc_crustylock; | |
| 542 | |
| 543 #define _PR_Lock_Malloc() { \ | |
| 544 if(PR_TRUE == _PR_malloc_initialised) { \ | |
| 545 PRStatus rv; \ | |
| 546 rv = pthread_mutex_lock(&_PR_MD_malloc_c
rustylock); \ | |
| 547 PR_ASSERT(0 == rv); \ | |
| 548 } | |
| 549 | |
| 550 #define _PR_Unlock_Malloc() if(PR_TRUE == _PR_malloc_initialised) { \ | |
| 551 PRStatus rv; \ | |
| 552 rv = pthread_mutex_unlock(&_PR_MD_malloc
_crustylock); \ | |
| 553 PR_ASSERT(0 == rv); \ | |
| 554 } \ | |
| 555 } | |
| 556 #else /* _PR_PTHREADS */ | |
| 557 static _MDLock _PR_MD_malloc_crustylock; | |
| 558 | |
| 559 #ifdef IRIX | |
| 560 #define _PR_Lock_Malloc() { \ | |
| 561 PRIntn _is; \ | |
| 562 if(PR_TRUE == _PR_malloc_initialised) { \ | |
| 563 if (_PR_MD_GET_ATTACHED_THREAD() &&
\ | |
| 564 !_PR_IS_NATIVE_THREAD( \ | |
| 565 _PR_MD_GET_ATTACHED_THREAD())) \ | |
| 566 _PR_INTSOFF(_is); \ | |
| 567 _PR_MD_LOCK(&_PR_MD_malloc_crustylock);
\ | |
| 568 } | |
| 569 | |
| 570 #define _PR_Unlock_Malloc() if(PR_TRUE == _PR_malloc_initialised) { \ | |
| 571 _PR_MD_UNLOCK(&_PR_MD_malloc_crustylock)
; \ | |
| 572 if (_PR_MD_GET_ATTACHED_THREAD() &&
\ | |
| 573 !_PR_IS_NATIVE_THREAD( \ | |
| 574 _PR_MD_GET_ATTACHED_THREAD())) \ | |
| 575 _PR_INTSON(_is); \ | |
| 576 } \ | |
| 577 } | |
| 578 #else /* IRIX */ | |
| 579 #define _PR_Lock_Malloc() { \ | |
| 580 PRIntn _is; \ | |
| 581 if(PR_TRUE == _PR_malloc_initialised) { \ | |
| 582 if (_PR_MD_CURRENT_THREAD() && \ | |
| 583 !_PR_IS_NATIVE_THREAD( \ | |
| 584 _PR_MD_CURRENT_THREAD())) \ | |
| 585 _PR_INTSOFF(_is); \ | |
| 586 _PR_MD_LOCK(&_PR_MD_malloc_crustylock);
\ | |
| 587 } | |
| 588 | |
| 589 #define _PR_Unlock_Malloc() if(PR_TRUE == _PR_malloc_initialised) { \ | |
| 590 _PR_MD_UNLOCK(&_PR_MD_malloc_crustylock)
; \ | |
| 591 if (_PR_MD_CURRENT_THREAD() && \ | |
| 592 !_PR_IS_NATIVE_THREAD( \ | |
| 593 _PR_MD_CURRENT_THREAD())) \ | |
| 594 _PR_INTSON(_is); \ | |
| 595 } \ | |
| 596 } | |
| 597 #endif /* IRIX */ | |
| 598 #endif /* _PR_PTHREADS */ | |
| 599 | |
| 600 PR_IMPLEMENT(PRStatus) _PR_MallocInit(void) | |
| 601 { | |
| 602 PRStatus rv = PR_SUCCESS; | |
| 603 | |
| 604 if( PR_TRUE == _PR_malloc_initialised ) return PR_SUCCESS; | |
| 605 | |
| 606 #ifdef _PR_PTHREADS | |
| 607 { | |
| 608 int status; | |
| 609 pthread_mutexattr_t mattr; | |
| 610 | |
| 611 status = _PT_PTHREAD_MUTEXATTR_INIT(&mattr); | |
| 612 PR_ASSERT(0 == status); | |
| 613 status = _PT_PTHREAD_MUTEX_INIT(_PR_MD_malloc_crustylock, mattr); | |
| 614 PR_ASSERT(0 == status); | |
| 615 status = _PT_PTHREAD_MUTEXATTR_DESTROY(&mattr); | |
| 616 PR_ASSERT(0 == status); | |
| 617 } | |
| 618 #else /* _PR_PTHREADS */ | |
| 619 _MD_NEW_LOCK(&_PR_MD_malloc_crustylock); | |
| 620 #endif /* _PR_PTHREADS */ | |
| 621 | |
| 622 if( PR_SUCCESS == rv ) | |
| 623 { | |
| 624 _PR_malloc_initialised = PR_TRUE; | |
| 625 } | |
| 626 | |
| 627 return rv; | |
| 628 } | |
| 629 | |
| 630 void *malloc(size_t size) | |
| 631 { | |
| 632 void *p; | |
| 633 _PR_Lock_Malloc(); | |
| 634 p = _PR_UnlockedMalloc(size); | |
| 635 _PR_Unlock_Malloc(); | |
| 636 return p; | |
| 637 } | |
| 638 | |
| 639 #if defined(IRIX) | |
| 640 void *memalign(size_t alignment, size_t size) | |
| 641 { | |
| 642 void *p; | |
| 643 _PR_Lock_Malloc(); | |
| 644 p = _PR_UnlockedMemalign(alignment, size); | |
| 645 _PR_Unlock_Malloc(); | |
| 646 return p; | |
| 647 } | |
| 648 | |
| 649 void *valloc(size_t size) | |
| 650 { | |
| 651 return(memalign(sysconf(_SC_PAGESIZE),size)); | |
| 652 } | |
| 653 #endif /* IRIX */ | |
| 654 | |
| 655 void free(void *ptr) | |
| 656 { | |
| 657 _PR_Lock_Malloc(); | |
| 658 _PR_UnlockedFree(ptr); | |
| 659 _PR_Unlock_Malloc(); | |
| 660 } | |
| 661 | |
| 662 void *realloc(void *ptr, size_t size) | |
| 663 { | |
| 664 void *p; | |
| 665 _PR_Lock_Malloc(); | |
| 666 p = _PR_UnlockedRealloc(ptr, size); | |
| 667 _PR_Unlock_Malloc(); | |
| 668 return p; | |
| 669 } | |
| 670 | |
| 671 void *calloc(size_t n, size_t elsize) | |
| 672 { | |
| 673 void *p; | |
| 674 _PR_Lock_Malloc(); | |
| 675 p = _PR_UnlockedCalloc(n, elsize); | |
| 676 _PR_Unlock_Malloc(); | |
| 677 return p; | |
| 678 } | |
| 679 | |
| 680 void cfree(void *p) | |
| 681 { | |
| 682 _PR_Lock_Malloc(); | |
| 683 _PR_UnlockedFree(p); | |
| 684 _PR_Unlock_Malloc(); | |
| 685 } | |
| 686 | |
| 687 void _PR_InitMem(void) | |
| 688 { | |
| 689 PRStatus rv; | |
| 690 rv = _PR_MallocInit(); | |
| 691 PR_ASSERT(PR_SUCCESS == rv); | |
| 692 } | |
| 693 | |
| 694 #endif /* _PR_OVERRIDE_MALLOC */ | |
| OLD | NEW |