| Index: nss/lib/base/arena.c
|
| diff --git a/nss/lib/base/arena.c b/nss/lib/base/arena.c
|
| index 2b83338ad12b3d4517ece39b78bac1080a29818e..b8e64643ddde230beff9cab63824fbd3d444ddd8 100644
|
| --- a/nss/lib/base/arena.c
|
| +++ b/nss/lib/base/arena.c
|
| @@ -41,7 +41,7 @@
|
| * nssArena_Mark
|
| * nssArena_Release
|
| * nssArena_Unmark
|
| - *
|
| + *
|
| * nss_ZAlloc
|
| * nss_ZFreeIf
|
| * nss_ZRealloc
|
| @@ -54,16 +54,16 @@
|
| */
|
|
|
| struct NSSArenaStr {
|
| - PLArenaPool pool;
|
| - PRLock *lock;
|
| + PLArenaPool pool;
|
| + PRLock *lock;
|
| #ifdef ARENA_THREADMARK
|
| - PRThread *marking_thread;
|
| - nssArenaMark *first_mark;
|
| - nssArenaMark *last_mark;
|
| + PRThread *marking_thread;
|
| + nssArenaMark *first_mark;
|
| + nssArenaMark *last_mark;
|
| #endif /* ARENA_THREADMARK */
|
| #ifdef ARENA_DESTRUCTOR_LIST
|
| - struct arena_destructor_node *first_destructor;
|
| - struct arena_destructor_node *last_destructor;
|
| + struct arena_destructor_node *first_destructor;
|
| + struct arena_destructor_node *last_destructor;
|
| #endif /* ARENA_DESTRUCTOR_LIST */
|
| };
|
|
|
| @@ -74,14 +74,14 @@ struct NSSArenaStr {
|
| */
|
|
|
| struct nssArenaMarkStr {
|
| - PRUint32 magic;
|
| - void *mark;
|
| + PRUint32 magic;
|
| + void *mark;
|
| #ifdef ARENA_THREADMARK
|
| - nssArenaMark *next;
|
| + nssArenaMark *next;
|
| #endif /* ARENA_THREADMARK */
|
| #ifdef ARENA_DESTRUCTOR_LIST
|
| - struct arena_destructor_node *next_destructor;
|
| - struct arena_destructor_node *prev_destructor;
|
| + struct arena_destructor_node *next_destructor;
|
| + struct arena_destructor_node *prev_destructor;
|
| #endif /* ARENA_DESTRUCTOR_LIST */
|
| };
|
|
|
| @@ -96,45 +96,39 @@ extern const NSSError NSS_ERROR_INTERNAL_ERROR;
|
| static nssPointerTracker arena_pointer_tracker;
|
|
|
| static PRStatus
|
| -arena_add_pointer
|
| -(
|
| - const NSSArena *arena
|
| -)
|
| +arena_add_pointer(const NSSArena *arena)
|
| {
|
| - PRStatus rv;
|
| -
|
| - rv = nssPointerTracker_initialize(&arena_pointer_tracker);
|
| - if( PR_SUCCESS != rv ) {
|
| - return rv;
|
| - }
|
| + PRStatus rv;
|
|
|
| - rv = nssPointerTracker_add(&arena_pointer_tracker, arena);
|
| - if( PR_SUCCESS != rv ) {
|
| - NSSError e = NSS_GetError();
|
| - if( NSS_ERROR_NO_MEMORY != e ) {
|
| - nss_SetError(NSS_ERROR_INTERNAL_ERROR);
|
| + rv = nssPointerTracker_initialize(&arena_pointer_tracker);
|
| + if (PR_SUCCESS != rv) {
|
| + return rv;
|
| }
|
|
|
| - return rv;
|
| - }
|
| + rv = nssPointerTracker_add(&arena_pointer_tracker, arena);
|
| + if (PR_SUCCESS != rv) {
|
| + NSSError e = NSS_GetError();
|
| + if (NSS_ERROR_NO_MEMORY != e) {
|
| + nss_SetError(NSS_ERROR_INTERNAL_ERROR);
|
| + }
|
| +
|
| + return rv;
|
| + }
|
|
|
| - return PR_SUCCESS;
|
| + return PR_SUCCESS;
|
| }
|
|
|
| static PRStatus
|
| -arena_remove_pointer
|
| -(
|
| - const NSSArena *arena
|
| -)
|
| +arena_remove_pointer(const NSSArena *arena)
|
| {
|
| - PRStatus rv;
|
| + PRStatus rv;
|
|
|
| - rv = nssPointerTracker_remove(&arena_pointer_tracker, arena);
|
| - if( PR_SUCCESS != rv ) {
|
| - nss_SetError(NSS_ERROR_INTERNAL_ERROR);
|
| - }
|
| + rv = nssPointerTracker_remove(&arena_pointer_tracker, arena);
|
| + if (PR_SUCCESS != rv) {
|
| + nss_SetError(NSS_ERROR_INTERNAL_ERROR);
|
| + }
|
|
|
| - return rv;
|
| + return rv;
|
| }
|
|
|
| /*
|
| @@ -155,45 +149,42 @@ arena_remove_pointer
|
| */
|
|
|
| NSS_IMPLEMENT PRStatus
|
| -nssArena_verifyPointer
|
| -(
|
| - const NSSArena *arena
|
| -)
|
| +nssArena_verifyPointer(const NSSArena *arena)
|
| {
|
| - PRStatus rv;
|
| -
|
| - rv = nssPointerTracker_initialize(&arena_pointer_tracker);
|
| - if( PR_SUCCESS != rv ) {
|
| - /*
|
| - * This is a little disingenious. We have to initialize the
|
| - * tracker, because someone could "legitimately" try to verify
|
| - * an arena pointer before one is ever created. And this step
|
| - * might fail, due to lack of memory. But the only way that
|
| - * this step can fail is if it's doing the call_once stuff,
|
| - * (later calls just no-op). And if it didn't no-op, there
|
| - * aren't any valid arenas.. so the argument certainly isn't one.
|
| - */
|
| - nss_SetError(NSS_ERROR_INVALID_ARENA);
|
| - return PR_FAILURE;
|
| - }
|
| + PRStatus rv;
|
| +
|
| + rv = nssPointerTracker_initialize(&arena_pointer_tracker);
|
| + if (PR_SUCCESS != rv) {
|
| + /*
|
| + * This is a little disingenious. We have to initialize the
|
| + * tracker, because someone could "legitimately" try to verify
|
| + * an arena pointer before one is ever created. And this step
|
| + * might fail, due to lack of memory. But the only way that
|
| + * this step can fail is if it's doing the call_once stuff,
|
| + * (later calls just no-op). And if it didn't no-op, there
|
| + * aren't any valid arenas.. so the argument certainly isn't one.
|
| + */
|
| + nss_SetError(NSS_ERROR_INVALID_ARENA);
|
| + return PR_FAILURE;
|
| + }
|
|
|
| - rv = nssPointerTracker_verify(&arena_pointer_tracker, arena);
|
| - if( PR_SUCCESS != rv ) {
|
| - nss_SetError(NSS_ERROR_INVALID_ARENA);
|
| - return PR_FAILURE;
|
| - }
|
| + rv = nssPointerTracker_verify(&arena_pointer_tracker, arena);
|
| + if (PR_SUCCESS != rv) {
|
| + nss_SetError(NSS_ERROR_INVALID_ARENA);
|
| + return PR_FAILURE;
|
| + }
|
|
|
| - return PR_SUCCESS;
|
| + return PR_SUCCESS;
|
| }
|
| #endif /* DEBUG */
|
|
|
| #ifdef ARENA_DESTRUCTOR_LIST
|
|
|
| struct arena_destructor_node {
|
| - struct arena_destructor_node *next;
|
| - struct arena_destructor_node *prev;
|
| - void (*destructor)(void *argument);
|
| - void *arg;
|
| + struct arena_destructor_node *next;
|
| + struct arena_destructor_node *prev;
|
| + void (*destructor)(void *argument);
|
| + void *arg;
|
| };
|
|
|
| /*
|
| @@ -208,9 +199,9 @@ struct arena_destructor_node {
|
| * arena, but it may not allocate or cause to be allocated any
|
| * memory. This callback facility was included to support our
|
| * debug-version pointer-tracker feature; overuse runs counter to
|
| - * the the original intent of arenas. This routine returns a
|
| - * PRStatus value; if successful, it will return PR_SUCCESS. If
|
| - * unsuccessful, it will set an error on the error stack and
|
| + * the the original intent of arenas. This routine returns a
|
| + * PRStatus value; if successful, it will return PR_SUCCESS. If
|
| + * unsuccessful, it will set an error on the error stack and
|
| * return PR_FAILURE.
|
| *
|
| * The error may be one of the following values:
|
| @@ -223,108 +214,97 @@ struct arena_destructor_node {
|
| */
|
|
|
| NSS_IMPLEMENT PRStatus
|
| -nssArena_registerDestructor
|
| -(
|
| - NSSArena *arena,
|
| - void (*destructor)(void *argument),
|
| - void *arg
|
| -)
|
| +nssArena_registerDestructor(NSSArena *arena, void (*destructor)(void *argument),
|
| + void *arg)
|
| {
|
| - struct arena_destructor_node *it;
|
| + struct arena_destructor_node *it;
|
|
|
| #ifdef NSSDEBUG
|
| - if( PR_SUCCESS != nssArena_verifyPointer(arena) ) {
|
| - return PR_FAILURE;
|
| - }
|
| + if (PR_SUCCESS != nssArena_verifyPointer(arena)) {
|
| + return PR_FAILURE;
|
| + }
|
| #endif /* NSSDEBUG */
|
| -
|
| - it = nss_ZNEW(arena, struct arena_destructor_node);
|
| - if( (struct arena_destructor_node *)NULL == it ) {
|
| - return PR_FAILURE;
|
| - }
|
| -
|
| - it->prev = arena->last_destructor;
|
| - arena->last_destructor->next = it;
|
| - arena->last_destructor = it;
|
| - it->destructor = destructor;
|
| - it->arg = arg;
|
| -
|
| - if( (nssArenaMark *)NULL != arena->last_mark ) {
|
| - arena->last_mark->prev_destructor = it->prev;
|
| - arena->last_mark->next_destructor = it->next;
|
| - }
|
| -
|
| - return PR_SUCCESS;
|
| +
|
| + it = nss_ZNEW(arena, struct arena_destructor_node);
|
| + if ((struct arena_destructor_node *)NULL == it) {
|
| + return PR_FAILURE;
|
| + }
|
| +
|
| + it->prev = arena->last_destructor;
|
| + arena->last_destructor->next = it;
|
| + arena->last_destructor = it;
|
| + it->destructor = destructor;
|
| + it->arg = arg;
|
| +
|
| + if ((nssArenaMark *)NULL != arena->last_mark) {
|
| + arena->last_mark->prev_destructor = it->prev;
|
| + arena->last_mark->next_destructor = it->next;
|
| + }
|
| +
|
| + return PR_SUCCESS;
|
| }
|
|
|
| NSS_IMPLEMENT PRStatus
|
| -nssArena_deregisterDestructor
|
| -(
|
| - NSSArena *arena,
|
| - void (*destructor)(void *argument),
|
| - void *arg
|
| -)
|
| +nssArena_deregisterDestructor(NSSArena *arena,
|
| + void (*destructor)(void *argument), void *arg)
|
| {
|
| - struct arena_destructor_node *it;
|
| + struct arena_destructor_node *it;
|
|
|
| #ifdef NSSDEBUG
|
| - if( PR_SUCCESS != nssArena_verifyPointer(arena) ) {
|
| - return PR_FAILURE;
|
| - }
|
| + if (PR_SUCCESS != nssArena_verifyPointer(arena)) {
|
| + return PR_FAILURE;
|
| + }
|
| #endif /* NSSDEBUG */
|
|
|
| - for( it = arena->first_destructor; it; it = it->next ) {
|
| - if( (it->destructor == destructor) && (it->arg == arg) ) {
|
| - break;
|
| + for (it = arena->first_destructor; it; it = it->next) {
|
| + if ((it->destructor == destructor) && (it->arg == arg)) {
|
| + break;
|
| + }
|
| }
|
| - }
|
| -
|
| - if( (struct arena_destructor_node *)NULL == it ) {
|
| - nss_SetError(NSS_ERROR_NOT_FOUND);
|
| - return PR_FAILURE;
|
| - }
|
| -
|
| - if( it == arena->first_destructor ) {
|
| - arena->first_destructor = it->next;
|
| - }
|
| -
|
| - if( it == arena->last_destructor ) {
|
| - arena->last_destructor = it->prev;
|
| - }
|
| -
|
| - if( (struct arena_destructor_node *)NULL != it->prev ) {
|
| - it->prev->next = it->next;
|
| - }
|
| -
|
| - if( (struct arena_destructor_node *)NULL != it->next ) {
|
| - it->next->prev = it->prev;
|
| - }
|
| -
|
| - {
|
| - nssArenaMark *m;
|
| - for( m = arena->first_mark; m; m = m->next ) {
|
| - if( m->next_destructor == it ) {
|
| - m->next_destructor = it->next;
|
| - }
|
| - if( m->prev_destructor == it ) {
|
| - m->prev_destructor = it->prev;
|
| - }
|
| +
|
| + if ((struct arena_destructor_node *)NULL == it) {
|
| + nss_SetError(NSS_ERROR_NOT_FOUND);
|
| + return PR_FAILURE;
|
| + }
|
| +
|
| + if (it == arena->first_destructor) {
|
| + arena->first_destructor = it->next;
|
| }
|
| - }
|
|
|
| - nss_ZFreeIf(it);
|
| - return PR_SUCCESS;
|
| + if (it == arena->last_destructor) {
|
| + arena->last_destructor = it->prev;
|
| + }
|
| +
|
| + if ((struct arena_destructor_node *)NULL != it->prev) {
|
| + it->prev->next = it->next;
|
| + }
|
| +
|
| + if ((struct arena_destructor_node *)NULL != it->next) {
|
| + it->next->prev = it->prev;
|
| + }
|
| +
|
| + {
|
| + nssArenaMark *m;
|
| + for (m = arena->first_mark; m; m = m->next) {
|
| + if (m->next_destructor == it) {
|
| + m->next_destructor = it->next;
|
| + }
|
| + if (m->prev_destructor == it) {
|
| + m->prev_destructor = it->prev;
|
| + }
|
| + }
|
| + }
|
| +
|
| + nss_ZFreeIf(it);
|
| + return PR_SUCCESS;
|
| }
|
|
|
| static void
|
| -nss_arena_call_destructor_chain
|
| -(
|
| - struct arena_destructor_node *it
|
| -)
|
| +nss_arena_call_destructor_chain(struct arena_destructor_node *it)
|
| {
|
| - for( ; it ; it = it->next ) {
|
| - (*(it->destructor))(it->arg);
|
| - }
|
| + for (; it; it = it->next) {
|
| + (*(it->destructor))(it->arg);
|
| + }
|
| }
|
|
|
| #endif /* ARENA_DESTRUCTOR_LIST */
|
| @@ -344,20 +324,17 @@ nss_arena_call_destructor_chain
|
| */
|
|
|
| NSS_IMPLEMENT NSSArena *
|
| -NSSArena_Create
|
| -(
|
| - void
|
| -)
|
| +NSSArena_Create(void)
|
| {
|
| - nss_ClearErrorStack();
|
| - return nssArena_Create();
|
| + nss_ClearErrorStack();
|
| + return nssArena_Create();
|
| }
|
|
|
| /*
|
| * nssArena_Create
|
| *
|
| * This routine creates a new memory arena. This routine may return
|
| - * NULL upon error, in which case it will have set an error on the
|
| + * NULL upon error, in which case it will have set an error on the
|
| * error stack.
|
| *
|
| * The error may be one of the following values:
|
| @@ -369,66 +346,63 @@ NSSArena_Create
|
| */
|
|
|
| NSS_IMPLEMENT NSSArena *
|
| -nssArena_Create
|
| -(
|
| - void
|
| -)
|
| +nssArena_Create(void)
|
| {
|
| - NSSArena *rv = (NSSArena *)NULL;
|
| -
|
| - rv = nss_ZNEW((NSSArena *)NULL, NSSArena);
|
| - if( (NSSArena *)NULL == rv ) {
|
| - nss_SetError(NSS_ERROR_NO_MEMORY);
|
| - return (NSSArena *)NULL;
|
| - }
|
| -
|
| - rv->lock = PR_NewLock();
|
| - if( (PRLock *)NULL == rv->lock ) {
|
| - (void)nss_ZFreeIf(rv);
|
| - nss_SetError(NSS_ERROR_NO_MEMORY);
|
| - return (NSSArena *)NULL;
|
| - }
|
| -
|
| - /*
|
| - * Arena sizes. The current security code has 229 occurrences of
|
| - * PORT_NewArena. The default chunksizes specified break down as
|
| - *
|
| - * Size Mult. Specified as
|
| - * 512 1 512
|
| - * 1024 7 1024
|
| - * 2048 5 2048
|
| - * 2048 5 CRMF_DEFAULT_ARENA_SIZE
|
| - * 2048 190 DER_DEFAULT_CHUNKSIZE
|
| - * 2048 20 SEC_ASN1_DEFAULT_ARENA_SIZE
|
| - * 4096 1 4096
|
| - *
|
| - * Obviously this "default chunksize" flexibility isn't very
|
| - * useful to us, so I'll just pick 2048.
|
| - */
|
| -
|
| - PL_InitArenaPool(&rv->pool, "NSS", 2048, sizeof(double));
|
| + NSSArena *rv = (NSSArena *)NULL;
|
| +
|
| + rv = nss_ZNEW((NSSArena *)NULL, NSSArena);
|
| + if ((NSSArena *)NULL == rv) {
|
| + nss_SetError(NSS_ERROR_NO_MEMORY);
|
| + return (NSSArena *)NULL;
|
| + }
|
| +
|
| + rv->lock = PR_NewLock();
|
| + if ((PRLock *)NULL == rv->lock) {
|
| + (void)nss_ZFreeIf(rv);
|
| + nss_SetError(NSS_ERROR_NO_MEMORY);
|
| + return (NSSArena *)NULL;
|
| + }
|
| +
|
| + /*
|
| + * Arena sizes. The current security code has 229 occurrences of
|
| + * PORT_NewArena. The default chunksizes specified break down as
|
| + *
|
| + * Size Mult. Specified as
|
| + * 512 1 512
|
| + * 1024 7 1024
|
| + * 2048 5 2048
|
| + * 2048 5 CRMF_DEFAULT_ARENA_SIZE
|
| + * 2048 190 DER_DEFAULT_CHUNKSIZE
|
| + * 2048 20 SEC_ASN1_DEFAULT_ARENA_SIZE
|
| + * 4096 1 4096
|
| + *
|
| + * Obviously this "default chunksize" flexibility isn't very
|
| + * useful to us, so I'll just pick 2048.
|
| + */
|
| +
|
| + PL_InitArenaPool(&rv->pool, "NSS", 2048, sizeof(double));
|
|
|
| #ifdef DEBUG
|
| - {
|
| - PRStatus st;
|
| - st = arena_add_pointer(rv);
|
| - if( PR_SUCCESS != st ) {
|
| - PL_FinishArenaPool(&rv->pool);
|
| - PR_DestroyLock(rv->lock);
|
| - (void)nss_ZFreeIf(rv);
|
| - return (NSSArena *)NULL;
|
| + {
|
| + PRStatus st;
|
| + st = arena_add_pointer(rv);
|
| + if (PR_SUCCESS != st) {
|
| + PL_FinishArenaPool(&rv->pool);
|
| + PR_DestroyLock(rv->lock);
|
| + (void)nss_ZFreeIf(rv);
|
| + return (NSSArena *)NULL;
|
| + }
|
| }
|
| - }
|
| #endif /* DEBUG */
|
|
|
| - return rv;
|
| + return rv;
|
| }
|
|
|
| /*
|
| * NSSArena_Destroy
|
| *
|
| * This routine will destroy the specified arena, freeing all memory
|
| - * allocated from it. This routine returns a PRStatus value; if
|
| + * allocated from it. This routine returns a PRStatus value; if
|
| * successful, it will return PR_SUCCESS. If unsuccessful, it will
|
| * create an error stack and return PR_FAILURE.
|
| *
|
| @@ -441,27 +415,24 @@ nssArena_Create
|
| */
|
|
|
| NSS_IMPLEMENT PRStatus
|
| -NSSArena_Destroy
|
| -(
|
| - NSSArena *arena
|
| -)
|
| +NSSArena_Destroy(NSSArena *arena)
|
| {
|
| - nss_ClearErrorStack();
|
| + nss_ClearErrorStack();
|
|
|
| #ifdef DEBUG
|
| - if( PR_SUCCESS != nssArena_verifyPointer(arena) ) {
|
| - return PR_FAILURE;
|
| - }
|
| + if (PR_SUCCESS != nssArena_verifyPointer(arena)) {
|
| + return PR_FAILURE;
|
| + }
|
| #endif /* DEBUG */
|
|
|
| - return nssArena_Destroy(arena);
|
| + return nssArena_Destroy(arena);
|
| }
|
|
|
| /*
|
| * nssArena_Destroy
|
| *
|
| * This routine will destroy the specified arena, freeing all memory
|
| - * allocated from it. This routine returns a PRStatus value; if
|
| + * allocated from it. This routine returns a PRStatus value; if
|
| * successful, it will return PR_SUCCESS. If unsuccessful, it will
|
| * set an error on the error stack and return PR_FAILURE.
|
| *
|
| @@ -474,45 +445,42 @@ NSSArena_Destroy
|
| */
|
|
|
| NSS_IMPLEMENT PRStatus
|
| -nssArena_Destroy
|
| -(
|
| - NSSArena *arena
|
| -)
|
| +nssArena_Destroy(NSSArena *arena)
|
| {
|
| - PRLock *lock;
|
| + PRLock *lock;
|
|
|
| #ifdef NSSDEBUG
|
| - if( PR_SUCCESS != nssArena_verifyPointer(arena) ) {
|
| - return PR_FAILURE;
|
| - }
|
| + if (PR_SUCCESS != nssArena_verifyPointer(arena)) {
|
| + return PR_FAILURE;
|
| + }
|
| #endif /* NSSDEBUG */
|
|
|
| - if( (PRLock *)NULL == arena->lock ) {
|
| - /* Just got destroyed */
|
| - nss_SetError(NSS_ERROR_INVALID_ARENA);
|
| - return PR_FAILURE;
|
| - }
|
| - PR_Lock(arena->lock);
|
| -
|
| + if ((PRLock *)NULL == arena->lock) {
|
| + /* Just got destroyed */
|
| + nss_SetError(NSS_ERROR_INVALID_ARENA);
|
| + return PR_FAILURE;
|
| + }
|
| + PR_Lock(arena->lock);
|
| +
|
| #ifdef DEBUG
|
| - if( PR_SUCCESS != arena_remove_pointer(arena) ) {
|
| - PR_Unlock(arena->lock);
|
| - return PR_FAILURE;
|
| - }
|
| + if (PR_SUCCESS != arena_remove_pointer(arena)) {
|
| + PR_Unlock(arena->lock);
|
| + return PR_FAILURE;
|
| + }
|
| #endif /* DEBUG */
|
|
|
| #ifdef ARENA_DESTRUCTOR_LIST
|
| - /* Note that the arena is locked at this time */
|
| - nss_arena_call_destructor_chain(arena->first_destructor);
|
| + /* Note that the arena is locked at this time */
|
| + nss_arena_call_destructor_chain(arena->first_destructor);
|
| #endif /* ARENA_DESTRUCTOR_LIST */
|
|
|
| - PL_FinishArenaPool(&arena->pool);
|
| - lock = arena->lock;
|
| - arena->lock = (PRLock *)NULL;
|
| - PR_Unlock(lock);
|
| - PR_DestroyLock(lock);
|
| - (void)nss_ZFreeIf(arena);
|
| - return PR_SUCCESS;
|
| + PL_FinishArenaPool(&arena->pool);
|
| + lock = arena->lock;
|
| + arena->lock = (PRLock *)NULL;
|
| + PR_Unlock(lock);
|
| + PR_DestroyLock(lock);
|
| + (void)nss_ZFreeIf(arena);
|
| + return PR_SUCCESS;
|
| }
|
|
|
| static void *nss_zalloc_arena_locked(NSSArena *arena, PRUint32 size);
|
| @@ -523,9 +491,9 @@ static void *nss_zalloc_arena_locked(NSSArena *arena, PRUint32 size);
|
| * This routine "marks" the current state of an arena. Space
|
| * allocated after the arena has been marked can be freed by
|
| * releasing the arena back to the mark with nssArena_Release,
|
| - * or committed by calling nssArena_Unmark. When successful,
|
| - * this routine returns a valid nssArenaMark pointer. This
|
| - * routine may return NULL upon error, in which case it will
|
| + * or committed by calling nssArena_Unmark. When successful,
|
| + * this routine returns a valid nssArenaMark pointer. This
|
| + * routine may return NULL upon error, in which case it will
|
| * have set an error on the error stack.
|
| *
|
| * The error may be one of the following values:
|
| @@ -539,73 +507,70 @@ static void *nss_zalloc_arena_locked(NSSArena *arena, PRUint32 size);
|
| */
|
|
|
| NSS_IMPLEMENT nssArenaMark *
|
| -nssArena_Mark
|
| -(
|
| - NSSArena *arena
|
| -)
|
| +nssArena_Mark(NSSArena *arena)
|
| {
|
| - nssArenaMark *rv;
|
| - void *p;
|
| + nssArenaMark *rv;
|
| + void *p;
|
|
|
| #ifdef NSSDEBUG
|
| - if( PR_SUCCESS != nssArena_verifyPointer(arena) ) {
|
| - return (nssArenaMark *)NULL;
|
| - }
|
| + if (PR_SUCCESS != nssArena_verifyPointer(arena)) {
|
| + return (nssArenaMark *)NULL;
|
| + }
|
| #endif /* NSSDEBUG */
|
|
|
| - if( (PRLock *)NULL == arena->lock ) {
|
| - /* Just got destroyed */
|
| - nss_SetError(NSS_ERROR_INVALID_ARENA);
|
| - return (nssArenaMark *)NULL;
|
| - }
|
| - PR_Lock(arena->lock);
|
| + if ((PRLock *)NULL == arena->lock) {
|
| + /* Just got destroyed */
|
| + nss_SetError(NSS_ERROR_INVALID_ARENA);
|
| + return (nssArenaMark *)NULL;
|
| + }
|
| + PR_Lock(arena->lock);
|
|
|
| #ifdef ARENA_THREADMARK
|
| - if( (PRThread *)NULL == arena->marking_thread ) {
|
| - /* Unmarked. Store our thread ID */
|
| - arena->marking_thread = PR_GetCurrentThread();
|
| - /* This call never fails. */
|
| - } else {
|
| - /* Marked. Verify it's the current thread */
|
| - if( PR_GetCurrentThread() != arena->marking_thread ) {
|
| - PR_Unlock(arena->lock);
|
| - nss_SetError(NSS_ERROR_ARENA_MARKED_BY_ANOTHER_THREAD);
|
| - return (nssArenaMark *)NULL;
|
| + if ((PRThread *)NULL == arena->marking_thread) {
|
| + /* Unmarked. Store our thread ID */
|
| + arena->marking_thread = PR_GetCurrentThread();
|
| + /* This call never fails. */
|
| + } else {
|
| + /* Marked. Verify it's the current thread */
|
| + if (PR_GetCurrentThread() != arena->marking_thread) {
|
| + PR_Unlock(arena->lock);
|
| + nss_SetError(NSS_ERROR_ARENA_MARKED_BY_ANOTHER_THREAD);
|
| + return (nssArenaMark *)NULL;
|
| + }
|
| }
|
| - }
|
| #endif /* ARENA_THREADMARK */
|
|
|
| - p = PL_ARENA_MARK(&arena->pool);
|
| - /* No error possible */
|
| + p = PL_ARENA_MARK(&arena->pool);
|
| + /* No error possible */
|
|
|
| - /* Do this after the mark */
|
| - rv = (nssArenaMark *)nss_zalloc_arena_locked(arena, sizeof(nssArenaMark));
|
| - if( (nssArenaMark *)NULL == rv ) {
|
| - PR_Unlock(arena->lock);
|
| - nss_SetError(NSS_ERROR_NO_MEMORY);
|
| - return (nssArenaMark *)NULL;
|
| - }
|
| + /* Do this after the mark */
|
| + rv = (nssArenaMark *)nss_zalloc_arena_locked(arena, sizeof(nssArenaMark));
|
| + if ((nssArenaMark *)NULL == rv) {
|
| + PR_Unlock(arena->lock);
|
| + nss_SetError(NSS_ERROR_NO_MEMORY);
|
| + return (nssArenaMark *)NULL;
|
| + }
|
|
|
| #ifdef ARENA_THREADMARK
|
| - if ( (nssArenaMark *)NULL == arena->first_mark) {
|
| - arena->first_mark = rv;
|
| - arena->last_mark = rv;
|
| - } else {
|
| - arena->last_mark->next = rv;
|
| - arena->last_mark = rv;
|
| - }
|
| + if ((nssArenaMark *)NULL == arena->first_mark) {
|
| + arena->first_mark = rv;
|
| + arena->last_mark = rv;
|
| + } else {
|
| + arena->last_mark->next = rv;
|
| + arena->last_mark = rv;
|
| + }
|
| #endif /* ARENA_THREADMARK */
|
|
|
| - rv->mark = p;
|
| - rv->magic = MARK_MAGIC;
|
| + rv->mark = p;
|
| + rv->magic = MARK_MAGIC;
|
|
|
| #ifdef ARENA_DESTRUCTOR_LIST
|
| - rv->prev_destructor = arena->last_destructor;
|
| + rv->prev_destructor = arena->last_destructor;
|
| #endif /* ARENA_DESTRUCTOR_LIST */
|
|
|
| - PR_Unlock(arena->lock);
|
| + PR_Unlock(arena->lock);
|
|
|
| - return rv;
|
| + return rv;
|
| }
|
|
|
| /*
|
| @@ -616,100 +581,98 @@ nssArena_Mark
|
| */
|
|
|
| static PRStatus
|
| -nss_arena_unmark_release
|
| -(
|
| - NSSArena *arena,
|
| - nssArenaMark *arenaMark,
|
| - PRBool release
|
| -)
|
| +nss_arena_unmark_release(NSSArena *arena, nssArenaMark *arenaMark,
|
| + PRBool release)
|
| {
|
| - void *inner_mark;
|
| + void *inner_mark;
|
|
|
| #ifdef NSSDEBUG
|
| - if( PR_SUCCESS != nssArena_verifyPointer(arena) ) {
|
| - return PR_FAILURE;
|
| - }
|
| + if (PR_SUCCESS != nssArena_verifyPointer(arena)) {
|
| + return PR_FAILURE;
|
| + }
|
| #endif /* NSSDEBUG */
|
|
|
| - if( MARK_MAGIC != arenaMark->magic ) {
|
| - nss_SetError(NSS_ERROR_INVALID_ARENA_MARK);
|
| - return PR_FAILURE;
|
| - }
|
| + if (MARK_MAGIC != arenaMark->magic) {
|
| + nss_SetError(NSS_ERROR_INVALID_ARENA_MARK);
|
| + return PR_FAILURE;
|
| + }
|
|
|
| - if( (PRLock *)NULL == arena->lock ) {
|
| - /* Just got destroyed */
|
| - nss_SetError(NSS_ERROR_INVALID_ARENA);
|
| - return PR_FAILURE;
|
| - }
|
| - PR_Lock(arena->lock);
|
| + if ((PRLock *)NULL == arena->lock) {
|
| + /* Just got destroyed */
|
| + nss_SetError(NSS_ERROR_INVALID_ARENA);
|
| + return PR_FAILURE;
|
| + }
|
| + PR_Lock(arena->lock);
|
|
|
| #ifdef ARENA_THREADMARK
|
| - if( (PRThread *)NULL != arena->marking_thread ) {
|
| - if( PR_GetCurrentThread() != arena->marking_thread ) {
|
| - PR_Unlock(arena->lock);
|
| - nss_SetError(NSS_ERROR_ARENA_MARKED_BY_ANOTHER_THREAD);
|
| - return PR_FAILURE;
|
| + if ((PRThread *)NULL != arena->marking_thread) {
|
| + if (PR_GetCurrentThread() != arena->marking_thread) {
|
| + PR_Unlock(arena->lock);
|
| + nss_SetError(NSS_ERROR_ARENA_MARKED_BY_ANOTHER_THREAD);
|
| + return PR_FAILURE;
|
| + }
|
| }
|
| - }
|
| #endif /* ARENA_THREADMARK */
|
|
|
| - if( MARK_MAGIC != arenaMark->magic ) {
|
| - /* Just got released */
|
| - PR_Unlock(arena->lock);
|
| - nss_SetError(NSS_ERROR_INVALID_ARENA_MARK);
|
| - return PR_FAILURE;
|
| - }
|
| -
|
| - arenaMark->magic = 0;
|
| - inner_mark = arenaMark->mark;
|
| -
|
| -#ifdef ARENA_THREADMARK
|
| - {
|
| - nssArenaMark **pMark = &arena->first_mark;
|
| - nssArenaMark *rest;
|
| - nssArenaMark *last = (nssArenaMark *)NULL;
|
| -
|
| - /* Find this mark */
|
| - while( *pMark != arenaMark ) {
|
| - last = *pMark;
|
| - pMark = &(*pMark)->next;
|
| + if (MARK_MAGIC != arenaMark->magic) {
|
| + /* Just got released */
|
| + PR_Unlock(arena->lock);
|
| + nss_SetError(NSS_ERROR_INVALID_ARENA_MARK);
|
| + return PR_FAILURE;
|
| }
|
|
|
| - /* Remember the pointer, then zero it */
|
| - rest = (*pMark)->next;
|
| - *pMark = (nssArenaMark *)NULL;
|
| + arenaMark->magic = 0;
|
| + inner_mark = arenaMark->mark;
|
|
|
| - arena->last_mark = last;
|
| -
|
| - /* Invalidate any later marks being implicitly released */
|
| - for( ; (nssArenaMark *)NULL != rest; rest = rest->next ) {
|
| - rest->magic = 0;
|
| - }
|
| -
|
| - /* If we just got rid of the first mark, clear the thread ID */
|
| - if( (nssArenaMark *)NULL == arena->first_mark ) {
|
| - arena->marking_thread = (PRThread *)NULL;
|
| +#ifdef ARENA_THREADMARK
|
| + {
|
| + nssArenaMark **pMark = &arena->first_mark;
|
| + nssArenaMark *rest;
|
| + nssArenaMark *last = (nssArenaMark *)NULL;
|
| +
|
| + /* Find this mark */
|
| + while (*pMark != arenaMark) {
|
| + last = *pMark;
|
| + pMark = &(*pMark)->next;
|
| + }
|
| +
|
| + /* Remember the pointer, then zero it */
|
| + rest = (*pMark)->next;
|
| + *pMark = (nssArenaMark *)NULL;
|
| +
|
| + arena->last_mark = last;
|
| +
|
| + /* Invalidate any later marks being implicitly released */
|
| + for (; (nssArenaMark *)NULL != rest; rest = rest->next) {
|
| + rest->magic = 0;
|
| + }
|
| +
|
| + /* If we just got rid of the first mark, clear the thread ID */
|
| + if ((nssArenaMark *)NULL == arena->first_mark) {
|
| + arena->marking_thread = (PRThread *)NULL;
|
| + }
|
| }
|
| - }
|
| #endif /* ARENA_THREADMARK */
|
|
|
| - if( release ) {
|
| + if (release) {
|
| #ifdef ARENA_DESTRUCTOR_LIST
|
| - if( (struct arena_destructor_node *)NULL != arenaMark->prev_destructor ) {
|
| - arenaMark->prev_destructor->next = (struct arena_destructor_node *)NULL;
|
| - }
|
| - arena->last_destructor = arenaMark->prev_destructor;
|
| -
|
| - /* Note that the arena is locked at this time */
|
| - nss_arena_call_destructor_chain(arenaMark->next_destructor);
|
| + if ((struct arena_destructor_node *)NULL !=
|
| + arenaMark->prev_destructor) {
|
| + arenaMark->prev_destructor->next =
|
| + (struct arena_destructor_node *)NULL;
|
| + }
|
| + arena->last_destructor = arenaMark->prev_destructor;
|
| +
|
| + /* Note that the arena is locked at this time */
|
| + nss_arena_call_destructor_chain(arenaMark->next_destructor);
|
| #endif /* ARENA_DESTRUCTOR_LIST */
|
|
|
| - PL_ARENA_RELEASE(&arena->pool, inner_mark);
|
| - /* No error return */
|
| - }
|
| + PL_ARENA_RELEASE(&arena->pool, inner_mark);
|
| + /* No error return */
|
| + }
|
|
|
| - PR_Unlock(arena->lock);
|
| - return PR_SUCCESS;
|
| + PR_Unlock(arena->lock);
|
| + return PR_SUCCESS;
|
| }
|
|
|
| /*
|
| @@ -732,13 +695,9 @@ nss_arena_unmark_release
|
| */
|
|
|
| NSS_IMPLEMENT PRStatus
|
| -nssArena_Release
|
| -(
|
| - NSSArena *arena,
|
| - nssArenaMark *arenaMark
|
| -)
|
| +nssArena_Release(NSSArena *arena, nssArenaMark *arenaMark)
|
| {
|
| - return nss_arena_unmark_release(arena, arenaMark, PR_TRUE);
|
| + return nss_arena_unmark_release(arena, arenaMark, PR_TRUE);
|
| }
|
|
|
| /*
|
| @@ -764,13 +723,9 @@ nssArena_Release
|
| */
|
|
|
| NSS_IMPLEMENT PRStatus
|
| -nssArena_Unmark
|
| -(
|
| - NSSArena *arena,
|
| - nssArenaMark *arenaMark
|
| -)
|
| +nssArena_Unmark(NSSArena *arena, nssArenaMark *arenaMark)
|
| {
|
| - return nss_arena_unmark_release(arena, arenaMark, PR_FALSE);
|
| + return nss_arena_unmark_release(arena, arenaMark, PR_FALSE);
|
| }
|
|
|
| /*
|
| @@ -782,49 +737,45 @@ nssArena_Unmark
|
| * maybe we should add a magic value?
|
| */
|
| struct pointer_header {
|
| - NSSArena *arena;
|
| - PRUint32 size;
|
| + NSSArena *arena;
|
| + PRUint32 size;
|
| };
|
|
|
| static void *
|
| -nss_zalloc_arena_locked
|
| -(
|
| - NSSArena *arena,
|
| - PRUint32 size
|
| -)
|
| +nss_zalloc_arena_locked(NSSArena *arena, PRUint32 size)
|
| {
|
| - void *p;
|
| - void *rv;
|
| - struct pointer_header *h;
|
| - PRUint32 my_size = size + sizeof(struct pointer_header);
|
| - PL_ARENA_ALLOCATE(p, &arena->pool, my_size);
|
| - if( (void *)NULL == p ) {
|
| - nss_SetError(NSS_ERROR_NO_MEMORY);
|
| - return (void *)NULL;
|
| - }
|
| - /*
|
| - * Do this before we unlock. This way if the user is using
|
| - * an arena in one thread while destroying it in another, he'll
|
| - * fault/FMR in his code, not ours.
|
| - */
|
| - h = (struct pointer_header *)p;
|
| - h->arena = arena;
|
| - h->size = size;
|
| - rv = (void *)((char *)h + sizeof(struct pointer_header));
|
| - (void)nsslibc_memset(rv, 0, size);
|
| - return rv;
|
| + void *p;
|
| + void *rv;
|
| + struct pointer_header *h;
|
| + PRUint32 my_size = size + sizeof(struct pointer_header);
|
| + PL_ARENA_ALLOCATE(p, &arena->pool, my_size);
|
| + if ((void *)NULL == p) {
|
| + nss_SetError(NSS_ERROR_NO_MEMORY);
|
| + return (void *)NULL;
|
| + }
|
| + /*
|
| + * Do this before we unlock. This way if the user is using
|
| + * an arena in one thread while destroying it in another, he'll
|
| + * fault/FMR in his code, not ours.
|
| + */
|
| + h = (struct pointer_header *)p;
|
| + h->arena = arena;
|
| + h->size = size;
|
| + rv = (void *)((char *)h + sizeof(struct pointer_header));
|
| + (void)nsslibc_memset(rv, 0, size);
|
| + return rv;
|
| }
|
|
|
| /*
|
| * NSS_ZAlloc
|
| *
|
| - * This routine allocates and zeroes a section of memory of the
|
| + * This routine allocates and zeroes a section of memory of the
|
| * size, and returns to the caller a pointer to that memory. If
|
| * the optional arena argument is non-null, the memory will be
|
| * obtained from that arena; otherwise, the memory will be obtained
|
| * from the heap. This routine may return NULL upon error, in
|
| * which case it will have set an error upon the error stack. The
|
| - * value specified for size may be zero; in which case a valid
|
| + * value specified for size may be zero; in which case a valid
|
| * zero-length block of memory will be allocated. This block may
|
| * be expanded by calling NSS_ZRealloc.
|
| *
|
| @@ -839,25 +790,21 @@ nss_zalloc_arena_locked
|
| */
|
|
|
| NSS_IMPLEMENT void *
|
| -NSS_ZAlloc
|
| -(
|
| - NSSArena *arenaOpt,
|
| - PRUint32 size
|
| -)
|
| +NSS_ZAlloc(NSSArena *arenaOpt, PRUint32 size)
|
| {
|
| - return nss_ZAlloc(arenaOpt, size);
|
| + return nss_ZAlloc(arenaOpt, size);
|
| }
|
|
|
| /*
|
| * nss_ZAlloc
|
| *
|
| - * This routine allocates and zeroes a section of memory of the
|
| + * This routine allocates and zeroes a section of memory of the
|
| * size, and returns to the caller a pointer to that memory. If
|
| * the optional arena argument is non-null, the memory will be
|
| * obtained from that arena; otherwise, the memory will be obtained
|
| * from the heap. This routine may return NULL upon error, in
|
| * which case it will have set an error upon the error stack. The
|
| - * value specified for size may be zero; in which case a valid
|
| + * value specified for size may be zero; in which case a valid
|
| * zero-length block of memory will be allocated. This block may
|
| * be expanded by calling nss_ZRealloc.
|
| *
|
| @@ -872,76 +819,72 @@ NSS_ZAlloc
|
| */
|
|
|
| NSS_IMPLEMENT void *
|
| -nss_ZAlloc
|
| -(
|
| - NSSArena *arenaOpt,
|
| - PRUint32 size
|
| -)
|
| +nss_ZAlloc(NSSArena *arenaOpt, PRUint32 size)
|
| {
|
| - struct pointer_header *h;
|
| - PRUint32 my_size = size + sizeof(struct pointer_header);
|
| -
|
| - if( my_size < sizeof(struct pointer_header) ) {
|
| - /* Wrapped */
|
| - nss_SetError(NSS_ERROR_NO_MEMORY);
|
| - return (void *)NULL;
|
| - }
|
| -
|
| - if( (NSSArena *)NULL == arenaOpt ) {
|
| - /* Heap allocation, no locking required. */
|
| - h = (struct pointer_header *)PR_Calloc(1, my_size);
|
| - if( (struct pointer_header *)NULL == h ) {
|
| - nss_SetError(NSS_ERROR_NO_MEMORY);
|
| - return (void *)NULL;
|
| + struct pointer_header *h;
|
| + PRUint32 my_size = size + sizeof(struct pointer_header);
|
| +
|
| + if (my_size < sizeof(struct pointer_header)) {
|
| + /* Wrapped */
|
| + nss_SetError(NSS_ERROR_NO_MEMORY);
|
| + return (void *)NULL;
|
| }
|
|
|
| - h->arena = (NSSArena *)NULL;
|
| - h->size = size;
|
| - /* We used calloc: it's already zeroed */
|
| + if ((NSSArena *)NULL == arenaOpt) {
|
| + /* Heap allocation, no locking required. */
|
| + h = (struct pointer_header *)PR_Calloc(1, my_size);
|
| + if ((struct pointer_header *)NULL == h) {
|
| + nss_SetError(NSS_ERROR_NO_MEMORY);
|
| + return (void *)NULL;
|
| + }
|
|
|
| - return (void *)((char *)h + sizeof(struct pointer_header));
|
| - } else {
|
| - void *rv;
|
| - /* Arena allocation */
|
| + h->arena = (NSSArena *)NULL;
|
| + h->size = size;
|
| + /* We used calloc: it's already zeroed */
|
| +
|
| + return (void *)((char *)h + sizeof(struct pointer_header));
|
| + } else {
|
| + void *rv;
|
| +/* Arena allocation */
|
| #ifdef NSSDEBUG
|
| - if( PR_SUCCESS != nssArena_verifyPointer(arenaOpt) ) {
|
| - return (void *)NULL;
|
| - }
|
| + if (PR_SUCCESS != nssArena_verifyPointer(arenaOpt)) {
|
| + return (void *)NULL;
|
| + }
|
| #endif /* NSSDEBUG */
|
|
|
| - if( (PRLock *)NULL == arenaOpt->lock ) {
|
| - /* Just got destroyed */
|
| - nss_SetError(NSS_ERROR_INVALID_ARENA);
|
| - return (void *)NULL;
|
| - }
|
| - PR_Lock(arenaOpt->lock);
|
| + if ((PRLock *)NULL == arenaOpt->lock) {
|
| + /* Just got destroyed */
|
| + nss_SetError(NSS_ERROR_INVALID_ARENA);
|
| + return (void *)NULL;
|
| + }
|
| + PR_Lock(arenaOpt->lock);
|
|
|
| #ifdef ARENA_THREADMARK
|
| - if( (PRThread *)NULL != arenaOpt->marking_thread ) {
|
| - if( PR_GetCurrentThread() != arenaOpt->marking_thread ) {
|
| - nss_SetError(NSS_ERROR_ARENA_MARKED_BY_ANOTHER_THREAD);
|
| - PR_Unlock(arenaOpt->lock);
|
| - return (void *)NULL;
|
| - }
|
| - }
|
| + if ((PRThread *)NULL != arenaOpt->marking_thread) {
|
| + if (PR_GetCurrentThread() != arenaOpt->marking_thread) {
|
| + nss_SetError(NSS_ERROR_ARENA_MARKED_BY_ANOTHER_THREAD);
|
| + PR_Unlock(arenaOpt->lock);
|
| + return (void *)NULL;
|
| + }
|
| + }
|
| #endif /* ARENA_THREADMARK */
|
|
|
| - rv = nss_zalloc_arena_locked(arenaOpt, size);
|
| + rv = nss_zalloc_arena_locked(arenaOpt, size);
|
|
|
| - PR_Unlock(arenaOpt->lock);
|
| - return rv;
|
| - }
|
| - /*NOTREACHED*/
|
| + PR_Unlock(arenaOpt->lock);
|
| + return rv;
|
| + }
|
| + /*NOTREACHED*/
|
| }
|
|
|
| /*
|
| * NSS_ZFreeIf
|
| *
|
| - * If the specified pointer is non-null, then the region of memory
|
| - * to which it points -- which must have been allocated with
|
| - * NSS_ZAlloc -- will be zeroed and released. This routine
|
| + * If the specified pointer is non-null, then the region of memory
|
| + * to which it points -- which must have been allocated with
|
| + * NSS_ZAlloc -- will be zeroed and released. This routine
|
| * returns a PRStatus value; if successful, it will return PR_SUCCESS.
|
| - * If unsuccessful, it will set an error on the error stack and return
|
| + * If unsuccessful, it will set an error on the error stack and return
|
| * PR_FAILURE.
|
| *
|
| * The error may be one of the following values:
|
| @@ -952,22 +895,19 @@ nss_ZAlloc
|
| * PR_FAILURE
|
| */
|
| NSS_IMPLEMENT PRStatus
|
| -NSS_ZFreeIf
|
| -(
|
| - void *pointer
|
| -)
|
| +NSS_ZFreeIf(void *pointer)
|
| {
|
| - return nss_ZFreeIf(pointer);
|
| + return nss_ZFreeIf(pointer);
|
| }
|
|
|
| /*
|
| * nss_ZFreeIf
|
| *
|
| - * If the specified pointer is non-null, then the region of memory
|
| - * to which it points -- which must have been allocated with
|
| - * nss_ZAlloc -- will be zeroed and released. This routine
|
| + * If the specified pointer is non-null, then the region of memory
|
| + * to which it points -- which must have been allocated with
|
| + * nss_ZAlloc -- will be zeroed and released. This routine
|
| * returns a PRStatus value; if successful, it will return PR_SUCCESS.
|
| - * If unsuccessful, it will set an error on the error stack and return
|
| + * If unsuccessful, it will set an error on the error stack and return
|
| * PR_FAILURE.
|
| *
|
| * The error may be one of the following values:
|
| @@ -979,60 +919,57 @@ NSS_ZFreeIf
|
| */
|
|
|
| NSS_IMPLEMENT PRStatus
|
| -nss_ZFreeIf
|
| -(
|
| - void *pointer
|
| -)
|
| +nss_ZFreeIf(void *pointer)
|
| {
|
| - struct pointer_header *h;
|
| + struct pointer_header *h;
|
|
|
| - if( (void *)NULL == pointer ) {
|
| - return PR_SUCCESS;
|
| - }
|
| + if ((void *)NULL == pointer) {
|
| + return PR_SUCCESS;
|
| + }
|
|
|
| - h = (struct pointer_header *)((char *)pointer
|
| - - sizeof(struct pointer_header));
|
| + h = (struct pointer_header *)((char *)pointer -
|
| + sizeof(struct pointer_header));
|
|
|
| - /* Check any magic here */
|
| + /* Check any magic here */
|
|
|
| - if( (NSSArena *)NULL == h->arena ) {
|
| - /* Heap */
|
| - (void)nsslibc_memset(pointer, 0, h->size);
|
| - PR_Free(h);
|
| - return PR_SUCCESS;
|
| - } else {
|
| - /* Arena */
|
| + if ((NSSArena *)NULL == h->arena) {
|
| + /* Heap */
|
| + (void)nsslibc_memset(pointer, 0, h->size);
|
| + PR_Free(h);
|
| + return PR_SUCCESS;
|
| + } else {
|
| +/* Arena */
|
| #ifdef NSSDEBUG
|
| - if( PR_SUCCESS != nssArena_verifyPointer(h->arena) ) {
|
| - return PR_FAILURE;
|
| - }
|
| + if (PR_SUCCESS != nssArena_verifyPointer(h->arena)) {
|
| + return PR_FAILURE;
|
| + }
|
| #endif /* NSSDEBUG */
|
|
|
| - if( (PRLock *)NULL == h->arena->lock ) {
|
| - /* Just got destroyed.. so this pointer is invalid */
|
| - nss_SetError(NSS_ERROR_INVALID_POINTER);
|
| - return PR_FAILURE;
|
| - }
|
| - PR_Lock(h->arena->lock);
|
| + if ((PRLock *)NULL == h->arena->lock) {
|
| + /* Just got destroyed.. so this pointer is invalid */
|
| + nss_SetError(NSS_ERROR_INVALID_POINTER);
|
| + return PR_FAILURE;
|
| + }
|
| + PR_Lock(h->arena->lock);
|
|
|
| - (void)nsslibc_memset(pointer, 0, h->size);
|
| + (void)nsslibc_memset(pointer, 0, h->size);
|
|
|
| - /* No way to "free" it within an NSPR arena. */
|
| + /* No way to "free" it within an NSPR arena. */
|
|
|
| - PR_Unlock(h->arena->lock);
|
| - return PR_SUCCESS;
|
| - }
|
| - /*NOTREACHED*/
|
| + PR_Unlock(h->arena->lock);
|
| + return PR_SUCCESS;
|
| + }
|
| + /*NOTREACHED*/
|
| }
|
|
|
| /*
|
| * NSS_ZRealloc
|
| *
|
| * This routine reallocates a block of memory obtained by calling
|
| - * nss_ZAlloc or nss_ZRealloc. The portion of memory
|
| + * nss_ZAlloc or nss_ZRealloc. The portion of memory
|
| * between the new and old sizes -- which is either being newly
|
| - * obtained or released -- is in either case zeroed. This routine
|
| - * may return NULL upon failure, in which case it will have placed
|
| + * obtained or released -- is in either case zeroed. This routine
|
| + * may return NULL upon failure, in which case it will have placed
|
| * an error on the error stack.
|
| *
|
| * The error may be one of the following values:
|
| @@ -1046,11 +983,7 @@ nss_ZFreeIf
|
| */
|
|
|
| NSS_EXTERN void *
|
| -NSS_ZRealloc
|
| -(
|
| - void *pointer,
|
| - PRUint32 newSize
|
| -)
|
| +NSS_ZRealloc(void *pointer, PRUint32 newSize)
|
| {
|
| return nss_ZRealloc(pointer, newSize);
|
| }
|
| @@ -1059,10 +992,10 @@ NSS_ZRealloc
|
| * nss_ZRealloc
|
| *
|
| * This routine reallocates a block of memory obtained by calling
|
| - * nss_ZAlloc or nss_ZRealloc. The portion of memory
|
| + * nss_ZAlloc or nss_ZRealloc. The portion of memory
|
| * between the new and old sizes -- which is either being newly
|
| - * obtained or released -- is in either case zeroed. This routine
|
| - * may return NULL upon failure, in which case it will have placed
|
| + * obtained or released -- is in either case zeroed. This routine
|
| + * may return NULL upon failure, in which case it will have placed
|
| * an error on the error stack.
|
| *
|
| * The error may be one of the following values:
|
| @@ -1076,139 +1009,135 @@ NSS_ZRealloc
|
| */
|
|
|
| NSS_EXTERN void *
|
| -nss_ZRealloc
|
| -(
|
| - void *pointer,
|
| - PRUint32 newSize
|
| -)
|
| +nss_ZRealloc(void *pointer, PRUint32 newSize)
|
| {
|
| - NSSArena *arena;
|
| - struct pointer_header *h, *new_h;
|
| - PRUint32 my_newSize = newSize + sizeof(struct pointer_header);
|
| - void *rv;
|
| -
|
| - if( my_newSize < sizeof(struct pointer_header) ) {
|
| - /* Wrapped */
|
| - nss_SetError(NSS_ERROR_NO_MEMORY);
|
| - return (void *)NULL;
|
| - }
|
| -
|
| - if( (void *)NULL == pointer ) {
|
| - nss_SetError(NSS_ERROR_INVALID_POINTER);
|
| - return (void *)NULL;
|
| - }
|
| -
|
| - h = (struct pointer_header *)((char *)pointer
|
| - - sizeof(struct pointer_header));
|
| -
|
| - /* Check any magic here */
|
| -
|
| - if( newSize == h->size ) {
|
| - /* saves thrashing */
|
| - return pointer;
|
| - }
|
| -
|
| - arena = h->arena;
|
| - if (!arena) {
|
| - /* Heap */
|
| - new_h = (struct pointer_header *)PR_Calloc(1, my_newSize);
|
| - if( (struct pointer_header *)NULL == new_h ) {
|
| - nss_SetError(NSS_ERROR_NO_MEMORY);
|
| - return (void *)NULL;
|
| - }
|
| + NSSArena *arena;
|
| + struct pointer_header *h, *new_h;
|
| + PRUint32 my_newSize = newSize + sizeof(struct pointer_header);
|
| + void *rv;
|
|
|
| - new_h->arena = (NSSArena *)NULL;
|
| - new_h->size = newSize;
|
| - rv = (void *)((char *)new_h + sizeof(struct pointer_header));
|
| + if (my_newSize < sizeof(struct pointer_header)) {
|
| + /* Wrapped */
|
| + nss_SetError(NSS_ERROR_NO_MEMORY);
|
| + return (void *)NULL;
|
| + }
|
|
|
| - if( newSize > h->size ) {
|
| - (void)nsslibc_memcpy(rv, pointer, h->size);
|
| - (void)nsslibc_memset(&((char *)rv)[ h->size ],
|
| - 0, (newSize - h->size));
|
| - } else {
|
| - (void)nsslibc_memcpy(rv, pointer, newSize);
|
| + if ((void *)NULL == pointer) {
|
| + nss_SetError(NSS_ERROR_INVALID_POINTER);
|
| + return (void *)NULL;
|
| }
|
|
|
| - (void)nsslibc_memset(pointer, 0, h->size);
|
| - h->size = 0;
|
| - PR_Free(h);
|
| + h = (struct pointer_header *)((char *)pointer -
|
| + sizeof(struct pointer_header));
|
|
|
| - return rv;
|
| - } else {
|
| - void *p;
|
| - /* Arena */
|
| -#ifdef NSSDEBUG
|
| - if (PR_SUCCESS != nssArena_verifyPointer(arena)) {
|
| - return (void *)NULL;
|
| + /* Check any magic here */
|
| +
|
| + if (newSize == h->size) {
|
| + /* saves thrashing */
|
| + return pointer;
|
| }
|
| +
|
| + arena = h->arena;
|
| + if (!arena) {
|
| + /* Heap */
|
| + new_h = (struct pointer_header *)PR_Calloc(1, my_newSize);
|
| + if ((struct pointer_header *)NULL == new_h) {
|
| + nss_SetError(NSS_ERROR_NO_MEMORY);
|
| + return (void *)NULL;
|
| + }
|
| +
|
| + new_h->arena = (NSSArena *)NULL;
|
| + new_h->size = newSize;
|
| + rv = (void *)((char *)new_h + sizeof(struct pointer_header));
|
| +
|
| + if (newSize > h->size) {
|
| + (void)nsslibc_memcpy(rv, pointer, h->size);
|
| + (void)nsslibc_memset(&((char *)rv)[h->size], 0,
|
| + (newSize - h->size));
|
| + } else {
|
| + (void)nsslibc_memcpy(rv, pointer, newSize);
|
| + }
|
| +
|
| + (void)nsslibc_memset(pointer, 0, h->size);
|
| + h->size = 0;
|
| + PR_Free(h);
|
| +
|
| + return rv;
|
| + } else {
|
| + void *p;
|
| +/* Arena */
|
| +#ifdef NSSDEBUG
|
| + if (PR_SUCCESS != nssArena_verifyPointer(arena)) {
|
| + return (void *)NULL;
|
| + }
|
| #endif /* NSSDEBUG */
|
|
|
| - if (!arena->lock) {
|
| - /* Just got destroyed.. so this pointer is invalid */
|
| - nss_SetError(NSS_ERROR_INVALID_POINTER);
|
| - return (void *)NULL;
|
| - }
|
| - PR_Lock(arena->lock);
|
| + if (!arena->lock) {
|
| + /* Just got destroyed.. so this pointer is invalid */
|
| + nss_SetError(NSS_ERROR_INVALID_POINTER);
|
| + return (void *)NULL;
|
| + }
|
| + PR_Lock(arena->lock);
|
|
|
| #ifdef ARENA_THREADMARK
|
| - if (arena->marking_thread) {
|
| - if (PR_GetCurrentThread() != arena->marking_thread) {
|
| - PR_Unlock(arena->lock);
|
| - nss_SetError(NSS_ERROR_ARENA_MARKED_BY_ANOTHER_THREAD);
|
| - return (void *)NULL;
|
| - }
|
| - }
|
| + if (arena->marking_thread) {
|
| + if (PR_GetCurrentThread() != arena->marking_thread) {
|
| + PR_Unlock(arena->lock);
|
| + nss_SetError(NSS_ERROR_ARENA_MARKED_BY_ANOTHER_THREAD);
|
| + return (void *)NULL;
|
| + }
|
| + }
|
| #endif /* ARENA_THREADMARK */
|
|
|
| - if( newSize < h->size ) {
|
| - /*
|
| - * We have no general way of returning memory to the arena
|
| - * (mark/release doesn't work because things may have been
|
| - * allocated after this object), so the memory is gone
|
| - * anyway. We might as well just return the same pointer to
|
| - * the user, saying "yeah, uh-hunh, you can only use less of
|
| - * it now." We'll zero the leftover part, of course. And
|
| - * in fact we might as well *not* adjust h->size-- this way,
|
| - * if the user reallocs back up to something not greater than
|
| - * the original size, then voila, there's the memory! This
|
| - * way a thrash big/small/big/small doesn't burn up the arena.
|
| - */
|
| - char *extra = &((char *)pointer)[ newSize ];
|
| - (void)nsslibc_memset(extra, 0, (h->size - newSize));
|
| - PR_Unlock(arena->lock);
|
| - return pointer;
|
| - }
|
| -
|
| - PL_ARENA_ALLOCATE(p, &arena->pool, my_newSize);
|
| - if( (void *)NULL == p ) {
|
| - PR_Unlock(arena->lock);
|
| - nss_SetError(NSS_ERROR_NO_MEMORY);
|
| - return (void *)NULL;
|
| - }
|
| -
|
| - new_h = (struct pointer_header *)p;
|
| - new_h->arena = arena;
|
| - new_h->size = newSize;
|
| - rv = (void *)((char *)new_h + sizeof(struct pointer_header));
|
| - if (rv != pointer) {
|
| - (void)nsslibc_memcpy(rv, pointer, h->size);
|
| - (void)nsslibc_memset(pointer, 0, h->size);
|
| + if (newSize < h->size) {
|
| + /*
|
| + * We have no general way of returning memory to the arena
|
| + * (mark/release doesn't work because things may have been
|
| + * allocated after this object), so the memory is gone
|
| + * anyway. We might as well just return the same pointer to
|
| + * the user, saying "yeah, uh-hunh, you can only use less of
|
| + * it now." We'll zero the leftover part, of course. And
|
| + * in fact we might as well *not* adjust h->size-- this way,
|
| + * if the user reallocs back up to something not greater than
|
| + * the original size, then voila, there's the memory! This
|
| + * way a thrash big/small/big/small doesn't burn up the arena.
|
| + */
|
| + char *extra = &((char *)pointer)[newSize];
|
| + (void)nsslibc_memset(extra, 0, (h->size - newSize));
|
| + PR_Unlock(arena->lock);
|
| + return pointer;
|
| + }
|
| +
|
| + PL_ARENA_ALLOCATE(p, &arena->pool, my_newSize);
|
| + if ((void *)NULL == p) {
|
| + PR_Unlock(arena->lock);
|
| + nss_SetError(NSS_ERROR_NO_MEMORY);
|
| + return (void *)NULL;
|
| + }
|
| +
|
| + new_h = (struct pointer_header *)p;
|
| + new_h->arena = arena;
|
| + new_h->size = newSize;
|
| + rv = (void *)((char *)new_h + sizeof(struct pointer_header));
|
| + if (rv != pointer) {
|
| + (void)nsslibc_memcpy(rv, pointer, h->size);
|
| + (void)nsslibc_memset(pointer, 0, h->size);
|
| + }
|
| + (void)nsslibc_memset(&((char *)rv)[h->size], 0, (newSize - h->size));
|
| + h->arena = (NSSArena *)NULL;
|
| + h->size = 0;
|
| + PR_Unlock(arena->lock);
|
| + return rv;
|
| }
|
| - (void)nsslibc_memset(&((char *)rv)[ h->size ], 0, (newSize - h->size));
|
| - h->arena = (NSSArena *)NULL;
|
| - h->size = 0;
|
| - PR_Unlock(arena->lock);
|
| - return rv;
|
| - }
|
| - /*NOTREACHED*/
|
| + /*NOTREACHED*/
|
| }
|
|
|
| -PRStatus
|
| +PRStatus
|
| nssArena_Shutdown(void)
|
| {
|
| - PRStatus rv = PR_SUCCESS;
|
| + PRStatus rv = PR_SUCCESS;
|
| #ifdef DEBUG
|
| - rv = nssPointerTracker_finalize(&arena_pointer_tracker);
|
| + rv = nssPointerTracker_finalize(&arena_pointer_tracker);
|
| #endif
|
| - return rv;
|
| + return rv;
|
| }
|
|
|