| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright 2008 The Native Client Authors. All rights reserved. | |
| 3 * Use of this source code is governed by a BSD-style license that can | |
| 4 * be found in the LICENSE file. | |
| 5 */ | |
| 6 | |
| 7 /* | |
| 8 * NaCl Generic containers. | |
| 9 */ | |
| 10 #include "native_client/src/include/portability.h" | |
| 11 #include <stdio.h> | |
| 12 #include <stdlib.h> | |
| 13 | |
| 14 #include "native_client/src/trusted/service_runtime/generic_container/container.
h" | |
| 15 | |
| 16 static struct NaClContainerVtbl const kNaClContainerListVtbl = { | |
| 17 NaClContainerListInsert, | |
| 18 NaClContainerListFind, | |
| 19 NaClContainerListDtor, | |
| 20 sizeof(struct NaClContainerListIter), | |
| 21 NaClContainerListIterCtor, | |
| 22 }; | |
| 23 | |
| 24 | |
| 25 /* | |
| 26 * functor_obj is the comparison functor, and is the first argument to | |
| 27 * cmp_fn as the "self" (or more commonly "this") argument. arguably | |
| 28 * we should create an explicit object with a vtable containing the | |
| 29 * single comparison function pointer, but this is so simple that that | |
| 30 * seems overkill. | |
| 31 */ | |
| 32 int NaClContainerListCtor(struct NaClContainerList *self, | |
| 33 struct NaClCmpFunctor *cmp_functor) { | |
| 34 self->cmp_functor = cmp_functor; | |
| 35 self->head = 0; | |
| 36 | |
| 37 self->base.vtbl = &kNaClContainerListVtbl; | |
| 38 return 1; | |
| 39 } | |
| 40 | |
| 41 | |
| 42 int NaClContainerListIterAtEnd(struct NaClContainerIter *vself) { | |
| 43 struct NaClContainerListIter *self = (struct NaClContainerListIter *) vself; | |
| 44 | |
| 45 return *self->cur == 0; | |
| 46 } | |
| 47 | |
| 48 | |
| 49 void *NaClContainerListIterStar(struct NaClContainerIter *vself) { | |
| 50 struct NaClContainerListIter *self = (struct NaClContainerListIter *) vself; | |
| 51 | |
| 52 return (*self->cur)->datum; | |
| 53 } | |
| 54 | |
| 55 | |
| 56 void NaClContainerListIterIncr(struct NaClContainerIter *vself) { | |
| 57 struct NaClContainerListIter *self = (struct NaClContainerListIter *) vself; | |
| 58 | |
| 59 self->cur = &(*self->cur)->next; | |
| 60 } | |
| 61 | |
| 62 | |
| 63 void NaClContainerListIterErase(struct NaClContainerIter *vself) { | |
| 64 struct NaClContainerListIter *self = (struct NaClContainerListIter *) vself; | |
| 65 struct NaClItemList *doomed = *self->cur; | |
| 66 struct NaClItemList *next = (*self->cur)->next; | |
| 67 | |
| 68 *self->cur = next; | |
| 69 free(doomed->datum); | |
| 70 free(doomed); | |
| 71 } | |
| 72 | |
| 73 | |
| 74 void *NaClContainerListIterExtract(struct NaClContainerIter *vself) { | |
| 75 struct NaClContainerListIter *self = (struct NaClContainerListIter *) vself; | |
| 76 struct NaClItemList *doomed = *self->cur; | |
| 77 struct NaClItemList *next = (*self->cur)->next; | |
| 78 void *datum; | |
| 79 | |
| 80 *self->cur = next; | |
| 81 datum = doomed->datum; | |
| 82 free(doomed); | |
| 83 return datum; | |
| 84 } | |
| 85 | |
| 86 static struct NaClContainerIterVtbl const kNaClContainerListIterVtbl = { | |
| 87 NaClContainerListIterAtEnd, | |
| 88 NaClContainerListIterStar, | |
| 89 NaClContainerListIterIncr, | |
| 90 NaClContainerListIterErase, | |
| 91 NaClContainerListIterExtract, | |
| 92 }; | |
| 93 | |
| 94 | |
| 95 int NaClContainerListInsert(struct NaClContainer *vself, | |
| 96 void *obj) { | |
| 97 struct NaClContainerList *self = (struct NaClContainerList *) vself; | |
| 98 struct NaClItemList *new_entry = malloc(sizeof *new_entry); | |
| 99 struct NaClItemList **ipp; | |
| 100 struct NaClItemList *ip; | |
| 101 | |
| 102 if (!new_entry) | |
| 103 return 0; | |
| 104 | |
| 105 new_entry->datum = obj; | |
| 106 | |
| 107 for (ipp = &self->head; 0 != (ip = *ipp); ipp = &ip->next) { | |
| 108 if ((*self->cmp_functor->vtbl->OrderCmp)(self->cmp_functor, | |
| 109 obj, ip->datum) >= 0) { | |
| 110 break; | |
| 111 } | |
| 112 } | |
| 113 | |
| 114 new_entry->next = ip; | |
| 115 *ipp = new_entry; | |
| 116 return 1; | |
| 117 } | |
| 118 | |
| 119 | |
| 120 struct NaClContainerIter *NaClContainerListFind( | |
| 121 struct NaClContainer *vself, | |
| 122 void *key, | |
| 123 struct NaClContainerIter *vout) { | |
| 124 struct NaClContainerList *self = (struct NaClContainerList *) vself; | |
| 125 struct NaClItemList **ipp; | |
| 126 struct NaClItemList *ip; | |
| 127 struct NaClContainerListIter *out = (struct NaClContainerListIter *) vout; | |
| 128 | |
| 129 for (ipp = &self->head; (ip = *ipp) != 0; ipp = &ip->next) { | |
| 130 if ((*self->cmp_functor->vtbl->OrderCmp)(self->cmp_functor, | |
| 131 key, ip->datum) == 0) { | |
| 132 break; | |
| 133 } | |
| 134 } | |
| 135 out->cur = ipp; | |
| 136 | |
| 137 vout->vtbl = &kNaClContainerListIterVtbl; | |
| 138 return vout; | |
| 139 } | |
| 140 | |
| 141 | |
| 142 void NaClContainerListDtor(struct NaClContainer *vself) { | |
| 143 struct NaClContainerList *self = (struct NaClContainerList *) vself; | |
| 144 struct NaClItemList *cur, *next; | |
| 145 | |
| 146 for (cur = self->head; cur; cur = next) { | |
| 147 next = cur->next; | |
| 148 free(cur->datum); | |
| 149 free(cur); | |
| 150 } | |
| 151 } | |
| 152 | |
| 153 | |
| 154 int NaClContainerListIterCtor(struct NaClContainer *vself, | |
| 155 struct NaClContainerIter *viter) { | |
| 156 struct NaClContainerList *self = (struct NaClContainerList *) vself; | |
| 157 struct NaClContainerListIter *iter = (struct NaClContainerListIter *) viter; | |
| 158 | |
| 159 iter->cur = &self->head; | |
| 160 | |
| 161 viter->vtbl = &kNaClContainerListIterVtbl; | |
| 162 return 1; | |
| 163 } | |
| 164 | |
| 165 | |
| 166 static struct NaClContainerVtbl const kNaClContainerHashTblVtbl = { | |
| 167 NaClContainerHashTblInsert, | |
| 168 NaClContainerHashTblFind, | |
| 169 NaClContainerHashTblDtor, | |
| 170 sizeof(struct NaClContainerHashTblIter), | |
| 171 NaClContainerHashTblIterCtor, | |
| 172 }; | |
| 173 | |
| 174 #define HASH_TABLE_LOAD_FACTOR 2 | |
| 175 #define HASH_TABLE_STRIDE 17 | |
| 176 /* | |
| 177 * The stride p is prime, so it is co-prime with any num_bucket value | |
| 178 * (except for its own multiples) and thus linear probing by | |
| 179 * increments of the stride will visit every bucket. (I.e., the | |
| 180 * stride value is a generator for the additive group Z_k^+, where | |
| 181 * k=num_bucket.) Assume that (k,p)=1. | |
| 182 * | |
| 183 * (p,+) is a generator is another way of saying that | |
| 184 * 0,p,2p,3p,...,(k-1)p (mod k) is a permutation of 0,1,...,k-1. | |
| 185 * | |
| 186 * assume otherwise, so (p,+) generates a subgroup of Z_k^+. let n be | |
| 187 * the least integer greater than 0 where np=0 mod k, i.e., 0 < n < k. | |
| 188 * since (p) doesn't generate Z_k^+, we have n < k. then np=mk for | |
| 189 * some integer m>0. since p is prime and p \not| k but p divides mk, | |
| 190 * p must divide m. write m=pq. then np = pqk, so n = qk. =><= | |
| 191 * | |
| 192 * The co-primality of the stride and number of bucket is a | |
| 193 * precondition for the insertion algorithm terminating. (Another | |
| 194 * precondition is that there is an unused slot.) | |
| 195 * | |
| 196 * We expand the hash table by doubling the number of buckets. Thus, | |
| 197 * if num_bucket starts off co-prime with the stride, it will remain | |
| 198 * so. | |
| 199 */ | |
| 200 #define HASH_TABLE_MIN_BUCKETS 32 | |
| 201 /* | |
| 202 * must be greater than HASH_TABLE_STRIDE and not a multiple so that | |
| 203 * they're co-prime. | |
| 204 */ | |
| 205 | |
| 206 static struct NaClContainerHashTblEntry *NaClContainerHashTblMakeBucket( | |
| 207 size_t size) { | |
| 208 struct NaClContainerHashTblEntry *bucket; | |
| 209 | |
| 210 if (SIZE_T_MAX / sizeof *bucket < size) { | |
| 211 return NULL; | |
| 212 } | |
| 213 bucket = calloc(size, sizeof *bucket); | |
| 214 return bucket; | |
| 215 } | |
| 216 | |
| 217 | |
| 218 /* | |
| 219 * The functor_obj is the comparison and hash functor. It is always | |
| 220 * passed as the first arguments to cmp_fn and hash_fn to allow these | |
| 221 * member functions to use its members to determine how to | |
| 222 * compare/hash. Arguably we should define a class with a vtable | |
| 223 * containing two virtual functions, but we don't expect there to be | |
| 224 * many object instances. | |
| 225 */ | |
| 226 int NaClContainerHashTblCtor(struct NaClContainerHashTbl *self, | |
| 227 struct NaClHashFunctor *hash_functor, | |
| 228 size_t num_buckets) { | |
| 229 int success; | |
| 230 | |
| 231 self->base.vtbl = (struct NaClContainerVtbl *) NULL; | |
| 232 | |
| 233 /* silently increase number of buckets if too small */ | |
| 234 if (num_buckets < HASH_TABLE_MIN_BUCKETS) { | |
| 235 num_buckets = HASH_TABLE_MIN_BUCKETS; | |
| 236 } | |
| 237 /* if a multiple of stride, silently increase it */ | |
| 238 if (0 == (num_buckets % HASH_TABLE_STRIDE)) { | |
| 239 ++num_buckets; | |
| 240 } | |
| 241 | |
| 242 self->hash_functor = hash_functor; | |
| 243 self->num_buckets = num_buckets; | |
| 244 self->num_entries = 0; | |
| 245 self->bucket = NaClContainerHashTblMakeBucket(num_buckets); | |
| 246 | |
| 247 success = self->bucket != 0; | |
| 248 | |
| 249 if (success) { | |
| 250 self->base.vtbl = &kNaClContainerHashTblVtbl; | |
| 251 } | |
| 252 return success; | |
| 253 } | |
| 254 | |
| 255 | |
| 256 int NaClContainerHashTblCheckLoad(struct NaClContainerHashTbl *self) { | |
| 257 struct NaClContainerHashTblEntry *entry; | |
| 258 struct NaClContainerHashTblEntry *new_table; | |
| 259 struct NaClContainerHashTblEntry *old_table; | |
| 260 size_t new_size; | |
| 261 size_t old_size; | |
| 262 size_t i; | |
| 263 | |
| 264 /* | |
| 265 * Check load factor. If greater than THRESHOLD, double number of | |
| 266 * buckets and rehash. | |
| 267 */ | |
| 268 if (HASH_TABLE_LOAD_FACTOR * self->num_entries < self->num_buckets) | |
| 269 return 1; | |
| 270 | |
| 271 old_size = self->num_buckets; | |
| 272 new_size = 2 * old_size; | |
| 273 /* | |
| 274 * arithmetic overflow?!? we normally would not have enough memory | |
| 275 */ | |
| 276 if (new_size < old_size) | |
| 277 return 0; | |
| 278 | |
| 279 new_table = NaClContainerHashTblMakeBucket(new_size); | |
| 280 if (!new_table) | |
| 281 return 0; | |
| 282 | |
| 283 old_table = self->bucket; | |
| 284 self->bucket = new_table; | |
| 285 | |
| 286 self->num_buckets = new_size; | |
| 287 self->num_entries = 0; | |
| 288 | |
| 289 /* | |
| 290 * The rehash/transfer to the new, expanded table should not fail, | |
| 291 * since the Insert method (below) does not fail. | |
| 292 */ | |
| 293 for (i = 0; i < old_size; i++) { | |
| 294 entry = &old_table[i]; | |
| 295 if (NACL_CHTE_USED != entry->flags) { | |
| 296 continue; | |
| 297 } | |
| 298 (*self->base.vtbl->Insert)((struct NaClContainer *) self, | |
| 299 entry->datum); | |
| 300 } | |
| 301 | |
| 302 free(old_table); | |
| 303 return 1; | |
| 304 } | |
| 305 | |
| 306 | |
| 307 static uintptr_t HashNext(uintptr_t idx, | |
| 308 size_t modulus) { | |
| 309 idx += HASH_TABLE_STRIDE; | |
| 310 while (idx >= modulus) { | |
| 311 idx -= modulus; | |
| 312 } | |
| 313 return idx; | |
| 314 } | |
| 315 | |
| 316 | |
| 317 int NaClContainerHashTblInsert(struct NaClContainer *vself, | |
| 318 void *obj) { | |
| 319 struct NaClContainerHashTbl *self = (struct NaClContainerHashTbl *) vself; | |
| 320 uintptr_t idx; | |
| 321 | |
| 322 for (idx = (*self->hash_functor->vtbl->Hash)(self->hash_functor, | |
| 323 obj) % self->num_buckets; | |
| 324 NACL_CHTE_USED == self->bucket[idx].flags; | |
| 325 idx = HashNext(idx, self->num_buckets)) { | |
| 326 } | |
| 327 /* unused or deleted */ | |
| 328 self->bucket[idx].datum = obj; | |
| 329 self->bucket[idx].flags = NACL_CHTE_USED; | |
| 330 ++self->num_entries; | |
| 331 | |
| 332 return NaClContainerHashTblCheckLoad(self); | |
| 333 } | |
| 334 | |
| 335 | |
| 336 /* fwd */ | |
| 337 static struct NaClContainerIterVtbl const kNaClContainerHashTblIterVtbl; | |
| 338 | |
| 339 | |
| 340 struct NaClContainerIter *NaClContainerHashTblFind( | |
| 341 struct NaClContainer *vself, | |
| 342 void *key, | |
| 343 struct NaClContainerIter *vout) { | |
| 344 struct NaClContainerHashTbl *self | |
| 345 = (struct NaClContainerHashTbl *) vself; | |
| 346 uintptr_t idx; | |
| 347 struct NaClContainerHashTblIter *out | |
| 348 = (struct NaClContainerHashTblIter *) vout; | |
| 349 | |
| 350 for (idx = (*self->hash_functor->vtbl->Hash)(self->hash_functor, | |
| 351 key) % self->num_buckets; | |
| 352 0 != self->bucket[idx].flags; | |
| 353 idx = HashNext(idx, self->num_buckets)) { | |
| 354 if (0 != (self->bucket[idx].flags & NACL_CHTE_USED) | |
| 355 && ((*self->hash_functor->vtbl->base.OrderCmp) | |
| 356 ((struct NaClCmpFunctor *) self->hash_functor, | |
| 357 self->bucket[idx].datum, key)) == 0) { | |
| 358 break; | |
| 359 } | |
| 360 } | |
| 361 if (0 == self->bucket[idx].flags) { | |
| 362 /* not found */ | |
| 363 idx = self->num_buckets; | |
| 364 } | |
| 365 | |
| 366 out->htbl = self; | |
| 367 out->idx = idx; | |
| 368 | |
| 369 vout->vtbl = &kNaClContainerHashTblIterVtbl; | |
| 370 return vout; | |
| 371 } | |
| 372 | |
| 373 | |
| 374 void NaClContainerHashTblDtor(struct NaClContainer *vself) { | |
| 375 struct NaClContainerHashTbl *self = (struct NaClContainerHashTbl *) vself; | |
| 376 unsigned int idx; | |
| 377 | |
| 378 for (idx = 0; idx < self->num_buckets; ++idx) { | |
| 379 if (self->bucket[idx].flags & NACL_CHTE_USED) { | |
| 380 free(self->bucket[idx].datum); | |
| 381 } | |
| 382 } | |
| 383 free(self->bucket); | |
| 384 self->bucket = 0; | |
| 385 } | |
| 386 | |
| 387 | |
| 388 int NaClContainerHashTblIterAtEnd(struct NaClContainerIter *vself) { | |
| 389 struct NaClContainerHashTblIter *self | |
| 390 = (struct NaClContainerHashTblIter *) vself; | |
| 391 | |
| 392 return self->idx >= self->htbl->num_buckets; | |
| 393 } | |
| 394 | |
| 395 | |
| 396 void *NaClContainerHashTblIterStar(struct NaClContainerIter *vself) { | |
| 397 struct NaClContainerHashTblIter *self | |
| 398 = (struct NaClContainerHashTblIter *) vself; | |
| 399 | |
| 400 return self->htbl->bucket[self->idx].datum; | |
| 401 } | |
| 402 | |
| 403 | |
| 404 void NaClContainerHashTblIterIncr(struct NaClContainerIter *vself) { | |
| 405 struct NaClContainerHashTblIter *self | |
| 406 = (struct NaClContainerHashTblIter *) vself; | |
| 407 | |
| 408 while (++self->idx < self->htbl->num_buckets) { | |
| 409 if (self->htbl->bucket[self->idx].flags == NACL_CHTE_USED) | |
| 410 return; | |
| 411 } | |
| 412 /* self->idx == self->htbl->num_buckets imply AtEnd */ | |
| 413 return; | |
| 414 } | |
| 415 | |
| 416 | |
| 417 void NaClContainerHashTblIterErase(struct NaClContainerIter *vself) { | |
| 418 struct NaClContainerHashTblIter *self | |
| 419 = (struct NaClContainerHashTblIter *) vself; | |
| 420 | |
| 421 if (self->htbl->bucket[self->idx].flags == NACL_CHTE_USED) { | |
| 422 --self->htbl->num_entries; | |
| 423 } | |
| 424 self->htbl->bucket[self->idx].flags = NACL_CHTE_DELETED; | |
| 425 free(self->htbl->bucket[self->idx].datum); | |
| 426 self->htbl->bucket[self->idx].datum = 0; | |
| 427 (*vself->vtbl->Incr)(vself); | |
| 428 } | |
| 429 | |
| 430 | |
| 431 void *NaClContainerHashTblIterExtract(struct NaClContainerIter *vself) { | |
| 432 struct NaClContainerHashTblIter *self | |
| 433 = (struct NaClContainerHashTblIter *) vself; | |
| 434 void *datum; | |
| 435 | |
| 436 if (self->htbl->bucket[self->idx].flags == NACL_CHTE_USED) { | |
| 437 --self->htbl->num_entries; | |
| 438 } | |
| 439 self->htbl->bucket[self->idx].flags = NACL_CHTE_DELETED; | |
| 440 datum = self->htbl->bucket[self->idx].datum; | |
| 441 self->htbl->bucket[self->idx].datum = 0; | |
| 442 (*vself->vtbl->Incr)(vself); | |
| 443 | |
| 444 return datum; | |
| 445 } | |
| 446 | |
| 447 | |
| 448 static struct NaClContainerIterVtbl const kNaClContainerHashTblIterVtbl = { | |
| 449 NaClContainerHashTblIterAtEnd, | |
| 450 NaClContainerHashTblIterStar, | |
| 451 NaClContainerHashTblIterIncr, | |
| 452 NaClContainerHashTblIterErase, | |
| 453 NaClContainerHashTblIterExtract, | |
| 454 }; | |
| 455 | |
| 456 | |
| 457 int NaClContainerHashTblIterCtor(struct NaClContainer *vself, | |
| 458 struct NaClContainerIter *viter) { | |
| 459 struct NaClContainerHashTbl *self | |
| 460 = (struct NaClContainerHashTbl *) vself; | |
| 461 struct NaClContainerHashTblIter *iter | |
| 462 = (struct NaClContainerHashTblIter *) viter; | |
| 463 | |
| 464 viter->vtbl = &kNaClContainerHashTblIterVtbl; | |
| 465 iter->htbl = self; | |
| 466 iter->idx = 0; | |
| 467 if (0 == (self->bucket[0].flags & NACL_CHTE_USED)) { | |
| 468 NaClContainerHashTblIterIncr(viter); | |
| 469 } | |
| 470 return 1; | |
| 471 } | |
| OLD | NEW |