Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(76)

Side by Side Diff: nspr/lib/ds/plarena.c

Issue 2078763002: Delete bundled copy of NSS and replace with README. (Closed) Base URL: https://chromium.googlesource.com/chromium/deps/nss@master
Patch Set: Delete bundled copy of NSS and replace with README. Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « nspr/lib/ds/plarena.h ('k') | nspr/lib/ds/plarenas.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5
6 /*
7 * Lifetime-based fast allocation, inspired by much prior art, including
8 * "Fast Allocation and Deallocation of Memory Based on Object Lifetimes"
9 * David R. Hanson, Software -- Practice and Experience, Vol. 20(1).
10 */
11 #include <stdlib.h>
12 #include <string.h>
13 #include "plarena.h"
14 #include "prmem.h"
15 #include "prbit.h"
16 #include "prlog.h"
17 #include "prlock.h"
18 #include "prinit.h"
19
20 static PLArena *arena_freelist;
21
22 #ifdef PL_ARENAMETER
23 static PLArenaStats *arena_stats_list;
24
25 #define COUNT(pool,what) (pool)->stats.what++
26 #else
27 #define COUNT(pool,what) /* nothing */
28 #endif
29
30 #define PL_ARENA_DEFAULT_ALIGN sizeof(double)
31
32 static PRLock *arenaLock;
33 static PRCallOnceType once;
34 static const PRCallOnceType pristineCallOnce;
35
36 /*
37 ** InitializeArenas() -- Initialize arena operations.
38 **
39 ** InitializeArenas() is called exactly once and only once from
40 ** LockArena(). This function creates the arena protection
41 ** lock: arenaLock.
42 **
43 ** Note: If the arenaLock cannot be created, InitializeArenas()
44 ** fails quietly, returning only PR_FAILURE. This percolates up
45 ** to the application using the Arena API. He gets no arena
46 ** from PL_ArenaAllocate(). It's up to him to fail gracefully
47 ** or recover.
48 **
49 */
50 static PRStatus InitializeArenas( void )
51 {
52 PR_ASSERT( arenaLock == NULL );
53 arenaLock = PR_NewLock();
54 if ( arenaLock == NULL )
55 return PR_FAILURE;
56 else
57 return PR_SUCCESS;
58 } /* end ArenaInitialize() */
59
60 static PRStatus LockArena( void )
61 {
62 PRStatus rc = PR_CallOnce( &once, InitializeArenas );
63
64 if ( PR_FAILURE != rc )
65 PR_Lock( arenaLock );
66 return(rc);
67 } /* end LockArena() */
68
69 static void UnlockArena( void )
70 {
71 PR_Unlock( arenaLock );
72 return;
73 } /* end UnlockArena() */
74
75 PR_IMPLEMENT(void) PL_InitArenaPool(
76 PLArenaPool *pool, const char *name, PRUint32 size, PRUint32 align)
77 {
78 /*
79 * Look-up table of PR_BITMASK(PR_CeilingLog2(align)) values for
80 * align = 1 to 32.
81 */
82 static const PRUint8 pmasks[33] = {
83 0, /* not used */
84 0, 1, 3, 3, 7, 7, 7, 7,15,15,15,15,15,15,15,15, /* 1 ... 16 */
85 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31}; /* 17 ... 32 */
86
87 if (align == 0)
88 align = PL_ARENA_DEFAULT_ALIGN;
89
90 if (align < sizeof(pmasks)/sizeof(pmasks[0]))
91 pool->mask = pmasks[align];
92 else
93 pool->mask = PR_BITMASK(PR_CeilingLog2(align));
94
95 pool->first.next = NULL;
96 /* Set all three addresses in pool->first to the same dummy value.
97 * These addresses are only compared with each other, but never
98 * dereferenced. */
99 pool->first.base = pool->first.avail = pool->first.limit =
100 (PRUword)PL_ARENA_ALIGN(pool, &pool->first + 1);
101 pool->current = &pool->first;
102 /*
103 * Compute the net size so that each arena's gross size is |size|.
104 * sizeof(PLArena) + pool->mask is the header and alignment slop
105 * that PL_ArenaAllocate adds to the net size.
106 */
107 if (size > sizeof(PLArena) + pool->mask)
108 pool->arenasize = size - (sizeof(PLArena) + pool->mask);
109 else
110 pool->arenasize = size;
111 #ifdef PL_ARENAMETER
112 memset(&pool->stats, 0, sizeof pool->stats);
113 pool->stats.name = strdup(name);
114 pool->stats.next = arena_stats_list;
115 arena_stats_list = &pool->stats;
116 #endif
117 }
118
119
120 /*
121 ** PL_ArenaAllocate() -- allocate space from an arena pool
122 **
123 ** Description: PL_ArenaAllocate() allocates space from an arena
124 ** pool.
125 **
126 ** First, try to satisfy the request from arenas starting at
127 ** pool->current.
128 **
129 ** If there is not enough space in the arena pool->current, try
130 ** to claim an arena, on a first fit basis, from the global
131 ** freelist (arena_freelist).
132 **
133 ** If no arena in arena_freelist is suitable, then try to
134 ** allocate a new arena from the heap.
135 **
136 ** Returns: pointer to allocated space or NULL
137 **
138 ** Notes: The original implementation had some difficult to
139 ** solve bugs; the code was difficult to read. Sometimes it's
140 ** just easier to rewrite it. I did that. larryh.
141 **
142 ** See also: bugzilla: 45343.
143 **
144 */
145
146 PR_IMPLEMENT(void *) PL_ArenaAllocate(PLArenaPool *pool, PRUint32 nb)
147 {
148 PLArena *a;
149 char *rp; /* returned pointer */
150 PRUint32 nbOld;
151
152 PR_ASSERT((nb & pool->mask) == 0);
153
154 nbOld = nb;
155 nb = (PRUword)PL_ARENA_ALIGN(pool, nb); /* force alignment */
156 if (nb < nbOld)
157 return NULL;
158
159 /* attempt to allocate from arenas at pool->current */
160 {
161 a = pool->current;
162 do {
163 if ( nb <= a->limit - a->avail ) {
164 pool->current = a;
165 rp = (char *)a->avail;
166 a->avail += nb;
167 return rp;
168 }
169 } while( NULL != (a = a->next) );
170 }
171
172 /* attempt to allocate from arena_freelist */
173 {
174 PLArena *p; /* previous pointer, for unlinking from freelist */
175
176 /* lock the arena_freelist. Make access to the freelist MT-Safe */
177 if ( PR_FAILURE == LockArena())
178 return(0);
179
180 for ( a = arena_freelist, p = NULL; a != NULL ; p = a, a = a->next ) {
181 if ( nb <= a->limit - a->base ) {
182 if ( p == NULL )
183 arena_freelist = a->next;
184 else
185 p->next = a->next;
186 UnlockArena();
187 a->avail = a->base;
188 rp = (char *)a->avail;
189 a->avail += nb;
190 /* the newly allocated arena is linked after pool->current
191 * and becomes pool->current */
192 a->next = pool->current->next;
193 pool->current->next = a;
194 pool->current = a;
195 if ( NULL == pool->first.next )
196 pool->first.next = a;
197 return(rp);
198 }
199 }
200 UnlockArena();
201 }
202
203 /* attempt to allocate from the heap */
204 {
205 PRUint32 sz = PR_MAX(pool->arenasize, nb);
206 if (PR_UINT32_MAX - sz < sizeof *a + pool->mask) {
207 a = NULL;
208 } else {
209 sz += sizeof *a + pool->mask; /* header and alignment slop */
210 a = (PLArena*)PR_MALLOC(sz);
211 }
212 if ( NULL != a ) {
213 a->limit = (PRUword)a + sz;
214 a->base = a->avail = (PRUword)PL_ARENA_ALIGN(pool, a + 1);
215 PL_MAKE_MEM_NOACCESS((void*)a->avail, a->limit - a->avail);
216 rp = (char *)a->avail;
217 a->avail += nb;
218 PR_ASSERT(a->avail <= a->limit);
219 /* the newly allocated arena is linked after pool->current
220 * and becomes pool->current */
221 a->next = pool->current->next;
222 pool->current->next = a;
223 pool->current = a;
224 if ( NULL == pool->first.next )
225 pool->first.next = a;
226 PL_COUNT_ARENA(pool,++);
227 COUNT(pool, nmallocs);
228 return(rp);
229 }
230 }
231
232 /* we got to here, and there's no memory to allocate */
233 return(NULL);
234 } /* --- end PL_ArenaAllocate() --- */
235
236 PR_IMPLEMENT(void *) PL_ArenaGrow(
237 PLArenaPool *pool, void *p, PRUint32 size, PRUint32 incr)
238 {
239 void *newp;
240
241 if (PR_UINT32_MAX - size < incr)
242 return NULL;
243 PL_ARENA_ALLOCATE(newp, pool, size + incr);
244 if (newp)
245 memcpy(newp, p, size);
246 return newp;
247 }
248
249 static void ClearArenaList(PLArena *a, PRInt32 pattern)
250 {
251
252 for (; a; a = a->next) {
253 PR_ASSERT(a->base <= a->avail && a->avail <= a->limit);
254 a->avail = a->base;
255 PL_CLEAR_UNUSED_PATTERN(a, pattern);
256 PL_MAKE_MEM_NOACCESS((void*)a->avail, a->limit - a->avail);
257 }
258 }
259
260 PR_IMPLEMENT(void) PL_ClearArenaPool(PLArenaPool *pool, PRInt32 pattern)
261 {
262 ClearArenaList(pool->first.next, pattern);
263 }
264
265 /*
266 * Free tail arenas linked after head, which may not be the true list head.
267 * Reset pool->current to point to head in case it pointed at a tail arena.
268 */
269 static void FreeArenaList(PLArenaPool *pool, PLArena *head, PRBool reallyFree)
270 {
271 PLArena **ap, *a;
272
273 ap = &head->next;
274 a = *ap;
275 if (!a)
276 return;
277
278 #ifdef DEBUG
279 ClearArenaList(a, PL_FREE_PATTERN);
280 #endif
281
282 if (reallyFree) {
283 do {
284 *ap = a->next;
285 PL_CLEAR_ARENA(a);
286 PL_COUNT_ARENA(pool,--);
287 PR_DELETE(a);
288 } while ((a = *ap) != 0);
289 } else {
290 /* Insert the whole arena chain at the front of the freelist. */
291 do {
292 PL_MAKE_MEM_NOACCESS((void*)(*ap)->base,
293 (*ap)->limit - (*ap)->base);
294 ap = &(*ap)->next;
295 } while (*ap);
296 LockArena();
297 *ap = arena_freelist;
298 arena_freelist = a;
299 head->next = 0;
300 UnlockArena();
301 }
302
303 pool->current = head;
304 }
305
306 PR_IMPLEMENT(void) PL_ArenaRelease(PLArenaPool *pool, char *mark)
307 {
308 PLArena *a;
309
310 for (a = &pool->first; a; a = a->next) {
311 if (PR_UPTRDIFF(mark, a->base) <= PR_UPTRDIFF(a->avail, a->base)) {
312 a->avail = (PRUword)PL_ARENA_ALIGN(pool, mark);
313 FreeArenaList(pool, a, PR_FALSE);
314 return;
315 }
316 }
317 }
318
319 PR_IMPLEMENT(void) PL_FreeArenaPool(PLArenaPool *pool)
320 {
321 FreeArenaList(pool, &pool->first, PR_FALSE);
322 COUNT(pool, ndeallocs);
323 }
324
325 PR_IMPLEMENT(void) PL_FinishArenaPool(PLArenaPool *pool)
326 {
327 FreeArenaList(pool, &pool->first, PR_TRUE);
328 #ifdef PL_ARENAMETER
329 {
330 PLArenaStats *stats, **statsp;
331
332 if (pool->stats.name)
333 PR_DELETE(pool->stats.name);
334 for (statsp = &arena_stats_list; (stats = *statsp) != 0;
335 statsp = &stats->next) {
336 if (stats == &pool->stats) {
337 *statsp = stats->next;
338 return;
339 }
340 }
341 }
342 #endif
343 }
344
345 PR_IMPLEMENT(void) PL_CompactArenaPool(PLArenaPool *ap)
346 {
347 }
348
349 PR_IMPLEMENT(void) PL_ArenaFinish(void)
350 {
351 PLArena *a, *next;
352
353 for (a = arena_freelist; a; a = next) {
354 next = a->next;
355 PR_DELETE(a);
356 }
357 arena_freelist = NULL;
358
359 if (arenaLock) {
360 PR_DestroyLock(arenaLock);
361 arenaLock = NULL;
362 }
363 once = pristineCallOnce;
364 }
365
366 PR_IMPLEMENT(size_t) PL_SizeOfArenaPoolExcludingPool(
367 const PLArenaPool *pool, PLMallocSizeFn mallocSizeOf)
368 {
369 /*
370 * The first PLArena is within |pool|, so don't measure it. Subsequent
371 * PLArenas are separate and must be measured.
372 */
373 size_t size = 0;
374 const PLArena *arena = pool->first.next;
375 while (arena) {
376 size += mallocSizeOf(arena);
377 arena = arena->next;
378 }
379 return size;
380 }
381
382 #ifdef PL_ARENAMETER
383 PR_IMPLEMENT(void) PL_ArenaCountAllocation(PLArenaPool *pool, PRUint32 nb)
384 {
385 pool->stats.nallocs++;
386 pool->stats.nbytes += nb;
387 if (nb > pool->stats.maxalloc)
388 pool->stats.maxalloc = nb;
389 pool->stats.variance += nb * nb;
390 }
391
392 PR_IMPLEMENT(void) PL_ArenaCountInplaceGrowth(
393 PLArenaPool *pool, PRUint32 size, PRUint32 incr)
394 {
395 pool->stats.ninplace++;
396 }
397
398 PR_IMPLEMENT(void) PL_ArenaCountGrowth(
399 PLArenaPool *pool, PRUint32 size, PRUint32 incr)
400 {
401 pool->stats.ngrows++;
402 pool->stats.nbytes += incr;
403 pool->stats.variance -= size * size;
404 size += incr;
405 if (size > pool->stats.maxalloc)
406 pool->stats.maxalloc = size;
407 pool->stats.variance += size * size;
408 }
409
410 PR_IMPLEMENT(void) PL_ArenaCountRelease(PLArenaPool *pool, char *mark)
411 {
412 pool->stats.nreleases++;
413 }
414
415 PR_IMPLEMENT(void) PL_ArenaCountRetract(PLArenaPool *pool, char *mark)
416 {
417 pool->stats.nfastrels++;
418 }
419
420 #include <math.h>
421 #include <stdio.h>
422
423 PR_IMPLEMENT(void) PL_DumpArenaStats(FILE *fp)
424 {
425 PLArenaStats *stats;
426 double mean, variance;
427
428 for (stats = arena_stats_list; stats; stats = stats->next) {
429 if (stats->nallocs != 0) {
430 mean = (double)stats->nbytes / stats->nallocs;
431 variance = fabs(stats->variance / stats->nallocs - mean * mean);
432 } else {
433 mean = variance = 0;
434 }
435
436 fprintf(fp, "\n%s allocation statistics:\n", stats->name);
437 fprintf(fp, " number of arenas: %u\n", stats->narenas);
438 fprintf(fp, " number of allocations: %u\n", stats->nallocs);
439 fprintf(fp, " number of free arena reclaims: %u\n", stats->nreclaims);
440 fprintf(fp, " number of malloc calls: %u\n", stats->nmallocs);
441 fprintf(fp, " number of deallocations: %u\n", stats->ndeallocs);
442 fprintf(fp, " number of allocation growths: %u\n", stats->ngrows);
443 fprintf(fp, " number of in-place growths: %u\n", stats->ninplace);
444 fprintf(fp, "number of released allocations: %u\n", stats->nreleases);
445 fprintf(fp, " number of fast releases: %u\n", stats->nfastrels);
446 fprintf(fp, " total bytes allocated: %u\n", stats->nbytes);
447 fprintf(fp, " mean allocation size: %g\n", mean);
448 fprintf(fp, " standard deviation: %g\n", sqrt(variance));
449 fprintf(fp, " maximum allocation size: %u\n", stats->maxalloc);
450 }
451 }
452 #endif /* PL_ARENAMETER */
OLDNEW
« no previous file with comments | « nspr/lib/ds/plarena.h ('k') | nspr/lib/ds/plarenas.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698