Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(86)

Side by Side Diff: third_party/grpc/src/core/transport/metadata.c

Issue 1932353002: Initial checkin of gRPC to third_party/ Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 *
3 * Copyright 2015-2016, Google Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34 #include "src/core/transport/metadata.h"
35
36 #include <assert.h>
37 #include <stddef.h>
38 #include <string.h>
39
40 #include <grpc/compression.h>
41 #include <grpc/support/alloc.h>
42 #include <grpc/support/atm.h>
43 #include <grpc/support/log.h>
44 #include <grpc/support/string_util.h>
45 #include <grpc/support/time.h>
46
47 #include "src/core/profiling/timers.h"
48 #include "src/core/support/murmur_hash.h"
49 #include "src/core/support/string.h"
50 #include "src/core/transport/chttp2/bin_encoder.h"
51 #include "src/core/transport/static_metadata.h"
52 #include "src/core/iomgr/iomgr_internal.h"
53
54 /* There are two kinds of mdelem and mdstr instances.
55 * Static instances are declared in static_metadata.{h,c} and
56 * are initialized by grpc_mdctx_global_init().
57 * Dynamic instances are stored in hash tables on grpc_mdctx, and are backed
58 * by internal_string and internal_element structures.
59 * Internal helper functions here-in (is_mdstr_static, is_mdelem_static) are
60 * used to determine which kind of element a pointer refers to.
61 */
62
63 #define INITIAL_STRTAB_CAPACITY 4
64 #define INITIAL_MDTAB_CAPACITY 4
65
66 #ifdef GRPC_METADATA_REFCOUNT_DEBUG
67 #define DEBUG_ARGS , const char *file, int line
68 #define FWD_DEBUG_ARGS , file, line
69 #define REF_MD_LOCKED(shard, s) ref_md_locked((shard), (s), __FILE__, __LINE__)
70 #else
71 #define DEBUG_ARGS
72 #define FWD_DEBUG_ARGS
73 #define REF_MD_LOCKED(shard, s) ref_md_locked((shard), (s))
74 #endif
75
76 #define TABLE_IDX(hash, log2_shards, capacity) \
77 (((hash) >> (log2_shards)) % (capacity))
78 #define SHARD_IDX(hash, log2_shards) ((hash) & ((1 << (log2_shards)) - 1))
79
80 typedef void (*destroy_user_data_func)(void *user_data);
81
82 /* Shadow structure for grpc_mdstr for non-static values */
83 typedef struct internal_string {
84 /* must be byte compatible with grpc_mdstr */
85 gpr_slice slice;
86 uint32_t hash;
87
88 /* private only data */
89 gpr_atm refcnt;
90
91 uint8_t has_base64_and_huffman_encoded;
92 gpr_slice_refcount refcount;
93
94 gpr_slice base64_and_huffman;
95
96 struct internal_string *bucket_next;
97 } internal_string;
98
99 /* Shadow structure for grpc_mdelem for non-static elements */
100 typedef struct internal_metadata {
101 /* must be byte compatible with grpc_mdelem */
102 internal_string *key;
103 internal_string *value;
104
105 /* private only data */
106 gpr_atm refcnt;
107
108 gpr_mu mu_user_data;
109 gpr_atm destroy_user_data;
110 gpr_atm user_data;
111
112 struct internal_metadata *bucket_next;
113 } internal_metadata;
114
115 typedef struct strtab_shard {
116 gpr_mu mu;
117 internal_string **strs;
118 size_t count;
119 size_t capacity;
120 } strtab_shard;
121
122 typedef struct mdtab_shard {
123 gpr_mu mu;
124 internal_metadata **elems;
125 size_t count;
126 size_t capacity;
127 size_t free;
128 } mdtab_shard;
129
130 #define LOG2_STRTAB_SHARD_COUNT 5
131 #define LOG2_MDTAB_SHARD_COUNT 4
132 #define STRTAB_SHARD_COUNT ((size_t)(1 << LOG2_STRTAB_SHARD_COUNT))
133 #define MDTAB_SHARD_COUNT ((size_t)(1 << LOG2_MDTAB_SHARD_COUNT))
134
135 /* hash seed: decided at initialization time */
136 static uint32_t g_hash_seed;
137 static int g_forced_hash_seed = 0;
138
139 /* linearly probed hash tables for static element lookup */
140 static grpc_mdstr *g_static_strtab[GRPC_STATIC_MDSTR_COUNT * 2];
141 static grpc_mdelem *g_static_mdtab[GRPC_STATIC_MDELEM_COUNT * 2];
142 static size_t g_static_strtab_maxprobe;
143 static size_t g_static_mdtab_maxprobe;
144
145 static strtab_shard g_strtab_shard[STRTAB_SHARD_COUNT];
146 static mdtab_shard g_mdtab_shard[MDTAB_SHARD_COUNT];
147
148 static void gc_mdtab(mdtab_shard *shard);
149
150 void grpc_test_only_set_metadata_hash_seed(uint32_t seed) {
151 g_hash_seed = seed;
152 g_forced_hash_seed = 1;
153 }
154
155 void grpc_mdctx_global_init(void) {
156 size_t i, j;
157 if (!g_forced_hash_seed) {
158 g_hash_seed = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec;
159 }
160 g_static_strtab_maxprobe = 0;
161 g_static_mdtab_maxprobe = 0;
162 /* build static tables */
163 memset(g_static_mdtab, 0, sizeof(g_static_mdtab));
164 memset(g_static_strtab, 0, sizeof(g_static_strtab));
165 for (i = 0; i < GRPC_STATIC_MDSTR_COUNT; i++) {
166 grpc_mdstr *elem = &grpc_static_mdstr_table[i];
167 const char *str = grpc_static_metadata_strings[i];
168 uint32_t hash = gpr_murmur_hash3(str, strlen(str), g_hash_seed);
169 *(gpr_slice *)&elem->slice = gpr_slice_from_static_string(str);
170 *(uint32_t *)&elem->hash = hash;
171 for (j = 0;; j++) {
172 size_t idx = (hash + j) % GPR_ARRAY_SIZE(g_static_strtab);
173 if (g_static_strtab[idx] == NULL) {
174 g_static_strtab[idx] = &grpc_static_mdstr_table[i];
175 break;
176 }
177 }
178 if (j > g_static_strtab_maxprobe) {
179 g_static_strtab_maxprobe = j;
180 }
181 }
182 for (i = 0; i < GRPC_STATIC_MDELEM_COUNT; i++) {
183 grpc_mdelem *elem = &grpc_static_mdelem_table[i];
184 grpc_mdstr *key =
185 &grpc_static_mdstr_table[grpc_static_metadata_elem_indices[2 * i + 0]];
186 grpc_mdstr *value =
187 &grpc_static_mdstr_table[grpc_static_metadata_elem_indices[2 * i + 1]];
188 uint32_t hash = GRPC_MDSTR_KV_HASH(key->hash, value->hash);
189 *(grpc_mdstr **)&elem->key = key;
190 *(grpc_mdstr **)&elem->value = value;
191 for (j = 0;; j++) {
192 size_t idx = (hash + j) % GPR_ARRAY_SIZE(g_static_mdtab);
193 if (g_static_mdtab[idx] == NULL) {
194 g_static_mdtab[idx] = elem;
195 break;
196 }
197 }
198 if (j > g_static_mdtab_maxprobe) {
199 g_static_mdtab_maxprobe = j;
200 }
201 }
202 /* initialize shards */
203 for (i = 0; i < STRTAB_SHARD_COUNT; i++) {
204 strtab_shard *shard = &g_strtab_shard[i];
205 gpr_mu_init(&shard->mu);
206 shard->count = 0;
207 shard->capacity = INITIAL_STRTAB_CAPACITY;
208 shard->strs = gpr_malloc(sizeof(*shard->strs) * shard->capacity);
209 memset(shard->strs, 0, sizeof(*shard->strs) * shard->capacity);
210 }
211 for (i = 0; i < MDTAB_SHARD_COUNT; i++) {
212 mdtab_shard *shard = &g_mdtab_shard[i];
213 gpr_mu_init(&shard->mu);
214 shard->count = 0;
215 shard->free = 0;
216 shard->capacity = INITIAL_MDTAB_CAPACITY;
217 shard->elems = gpr_malloc(sizeof(*shard->elems) * shard->capacity);
218 memset(shard->elems, 0, sizeof(*shard->elems) * shard->capacity);
219 }
220 }
221
222 void grpc_mdctx_global_shutdown(void) {
223 size_t i;
224 for (i = 0; i < MDTAB_SHARD_COUNT; i++) {
225 mdtab_shard *shard = &g_mdtab_shard[i];
226 gpr_mu_destroy(&shard->mu);
227 gc_mdtab(shard);
228 /* TODO(ctiller): GPR_ASSERT(shard->count == 0); */
229 if (shard->count != 0) {
230 gpr_log(GPR_DEBUG, "WARNING: %d metadata elements were leaked",
231 shard->count);
232 if (grpc_iomgr_abort_on_leaks()) {
233 abort();
234 }
235 }
236 gpr_free(shard->elems);
237 }
238 for (i = 0; i < STRTAB_SHARD_COUNT; i++) {
239 strtab_shard *shard = &g_strtab_shard[i];
240 gpr_mu_destroy(&shard->mu);
241 /* TODO(ctiller): GPR_ASSERT(shard->count == 0); */
242 if (shard->count != 0) {
243 gpr_log(GPR_DEBUG, "WARNING: %d metadata strings were leaked",
244 shard->count);
245 if (grpc_iomgr_abort_on_leaks()) {
246 abort();
247 }
248 }
249 gpr_free(shard->strs);
250 }
251 }
252
253 static int is_mdstr_static(grpc_mdstr *s) {
254 return s >= &grpc_static_mdstr_table[0] &&
255 s < &grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];
256 }
257
258 static int is_mdelem_static(grpc_mdelem *e) {
259 return e >= &grpc_static_mdelem_table[0] &&
260 e < &grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];
261 }
262
263 static void ref_md_locked(mdtab_shard *shard,
264 internal_metadata *md DEBUG_ARGS) {
265 #ifdef GRPC_METADATA_REFCOUNT_DEBUG
266 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
267 "ELM REF:%p:%d->%d: '%s' = '%s'", md,
268 gpr_atm_no_barrier_load(&md->refcnt),
269 gpr_atm_no_barrier_load(&md->refcnt) + 1,
270 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
271 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
272 #endif
273 if (0 == gpr_atm_no_barrier_fetch_add(&md->refcnt, 2)) {
274 shard->free--;
275 } else {
276 GPR_ASSERT(1 != gpr_atm_no_barrier_fetch_add(&md->refcnt, -1));
277 }
278 }
279
280 static void grow_strtab(strtab_shard *shard) {
281 size_t capacity = shard->capacity * 2;
282 size_t i;
283 internal_string **strtab;
284 internal_string *s, *next;
285
286 GPR_TIMER_BEGIN("grow_strtab", 0);
287
288 strtab = gpr_malloc(sizeof(internal_string *) * capacity);
289 memset(strtab, 0, sizeof(internal_string *) * capacity);
290
291 for (i = 0; i < shard->capacity; i++) {
292 for (s = shard->strs[i]; s; s = next) {
293 size_t idx = TABLE_IDX(s->hash, LOG2_STRTAB_SHARD_COUNT, capacity);
294 next = s->bucket_next;
295 s->bucket_next = strtab[idx];
296 strtab[idx] = s;
297 }
298 }
299
300 gpr_free(shard->strs);
301 shard->strs = strtab;
302 shard->capacity = capacity;
303
304 GPR_TIMER_END("grow_strtab", 0);
305 }
306
307 static void internal_destroy_string(strtab_shard *shard, internal_string *is) {
308 internal_string **prev_next;
309 internal_string *cur;
310 GPR_TIMER_BEGIN("internal_destroy_string", 0);
311 if (is->has_base64_and_huffman_encoded) {
312 gpr_slice_unref(is->base64_and_huffman);
313 }
314 for (prev_next = &shard->strs[TABLE_IDX(is->hash, LOG2_STRTAB_SHARD_COUNT,
315 shard->capacity)],
316 cur = *prev_next;
317 cur != is; prev_next = &cur->bucket_next, cur = cur->bucket_next)
318 ;
319 *prev_next = cur->bucket_next;
320 shard->count--;
321 gpr_free(is);
322 GPR_TIMER_END("internal_destroy_string", 0);
323 }
324
325 static void slice_ref(void *p) {
326 internal_string *is =
327 (internal_string *)((char *)p - offsetof(internal_string, refcount));
328 GRPC_MDSTR_REF((grpc_mdstr *)(is));
329 }
330
331 static void slice_unref(void *p) {
332 internal_string *is =
333 (internal_string *)((char *)p - offsetof(internal_string, refcount));
334 GRPC_MDSTR_UNREF((grpc_mdstr *)(is));
335 }
336
337 grpc_mdstr *grpc_mdstr_from_string(const char *str) {
338 return grpc_mdstr_from_buffer((const uint8_t *)str, strlen(str));
339 }
340
341 grpc_mdstr *grpc_mdstr_from_slice(gpr_slice slice) {
342 grpc_mdstr *result = grpc_mdstr_from_buffer(GPR_SLICE_START_PTR(slice),
343 GPR_SLICE_LENGTH(slice));
344 gpr_slice_unref(slice);
345 return result;
346 }
347
348 grpc_mdstr *grpc_mdstr_from_buffer(const uint8_t *buf, size_t length) {
349 uint32_t hash = gpr_murmur_hash3(buf, length, g_hash_seed);
350 internal_string *s;
351 strtab_shard *shard =
352 &g_strtab_shard[SHARD_IDX(hash, LOG2_STRTAB_SHARD_COUNT)];
353 size_t i;
354 size_t idx;
355
356 GPR_TIMER_BEGIN("grpc_mdstr_from_buffer", 0);
357
358 /* search for a static string */
359 for (i = 0; i <= g_static_strtab_maxprobe; i++) {
360 grpc_mdstr *ss;
361 idx = (hash + i) % GPR_ARRAY_SIZE(g_static_strtab);
362 ss = g_static_strtab[idx];
363 if (ss == NULL) break;
364 if (ss->hash == hash && GPR_SLICE_LENGTH(ss->slice) == length &&
365 0 == memcmp(buf, GPR_SLICE_START_PTR(ss->slice), length)) {
366 GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
367 return ss;
368 }
369 }
370
371 gpr_mu_lock(&shard->mu);
372
373 /* search for an existing string */
374 idx = TABLE_IDX(hash, LOG2_STRTAB_SHARD_COUNT, shard->capacity);
375 for (s = shard->strs[idx]; s; s = s->bucket_next) {
376 if (s->hash == hash && GPR_SLICE_LENGTH(s->slice) == length &&
377 0 == memcmp(buf, GPR_SLICE_START_PTR(s->slice), length)) {
378 GRPC_MDSTR_REF((grpc_mdstr *)s);
379 gpr_mu_unlock(&shard->mu);
380 GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
381 return (grpc_mdstr *)s;
382 }
383 }
384
385 /* not found: create a new string */
386 if (length + 1 < GPR_SLICE_INLINED_SIZE) {
387 /* string data goes directly into the slice */
388 s = gpr_malloc(sizeof(internal_string));
389 gpr_atm_rel_store(&s->refcnt, 2);
390 s->slice.refcount = NULL;
391 memcpy(s->slice.data.inlined.bytes, buf, length);
392 s->slice.data.inlined.bytes[length] = 0;
393 s->slice.data.inlined.length = (uint8_t)length;
394 } else {
395 /* string data goes after the internal_string header, and we +1 for null
396 terminator */
397 s = gpr_malloc(sizeof(internal_string) + length + 1);
398 gpr_atm_rel_store(&s->refcnt, 2);
399 s->refcount.ref = slice_ref;
400 s->refcount.unref = slice_unref;
401 s->slice.refcount = &s->refcount;
402 s->slice.data.refcounted.bytes = (uint8_t *)(s + 1);
403 s->slice.data.refcounted.length = length;
404 memcpy(s->slice.data.refcounted.bytes, buf, length);
405 /* add a null terminator for cheap c string conversion when desired */
406 s->slice.data.refcounted.bytes[length] = 0;
407 }
408 s->has_base64_and_huffman_encoded = 0;
409 s->hash = hash;
410 s->bucket_next = shard->strs[idx];
411 shard->strs[idx] = s;
412
413 shard->count++;
414
415 if (shard->count > shard->capacity * 2) {
416 grow_strtab(shard);
417 }
418
419 gpr_mu_unlock(&shard->mu);
420 GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
421
422 return (grpc_mdstr *)s;
423 }
424
425 static void gc_mdtab(mdtab_shard *shard) {
426 size_t i;
427 internal_metadata **prev_next;
428 internal_metadata *md, *next;
429
430 GPR_TIMER_BEGIN("gc_mdtab", 0);
431 for (i = 0; i < shard->capacity; i++) {
432 prev_next = &shard->elems[i];
433 for (md = shard->elems[i]; md; md = next) {
434 void *user_data = (void *)gpr_atm_no_barrier_load(&md->user_data);
435 next = md->bucket_next;
436 if (gpr_atm_acq_load(&md->refcnt) == 0) {
437 GRPC_MDSTR_UNREF((grpc_mdstr *)md->key);
438 GRPC_MDSTR_UNREF((grpc_mdstr *)md->value);
439 if (md->user_data) {
440 ((destroy_user_data_func)gpr_atm_no_barrier_load(
441 &md->destroy_user_data))(user_data);
442 }
443 gpr_free(md);
444 *prev_next = next;
445 shard->free--;
446 shard->count--;
447 } else {
448 prev_next = &md->bucket_next;
449 }
450 }
451 }
452 GPR_TIMER_END("gc_mdtab", 0);
453 }
454
455 static void grow_mdtab(mdtab_shard *shard) {
456 size_t capacity = shard->capacity * 2;
457 size_t i;
458 internal_metadata **mdtab;
459 internal_metadata *md, *next;
460 uint32_t hash;
461
462 GPR_TIMER_BEGIN("grow_mdtab", 0);
463
464 mdtab = gpr_malloc(sizeof(internal_metadata *) * capacity);
465 memset(mdtab, 0, sizeof(internal_metadata *) * capacity);
466
467 for (i = 0; i < shard->capacity; i++) {
468 for (md = shard->elems[i]; md; md = next) {
469 size_t idx;
470 hash = GRPC_MDSTR_KV_HASH(md->key->hash, md->value->hash);
471 next = md->bucket_next;
472 idx = TABLE_IDX(hash, LOG2_MDTAB_SHARD_COUNT, capacity);
473 md->bucket_next = mdtab[idx];
474 mdtab[idx] = md;
475 }
476 }
477
478 gpr_free(shard->elems);
479 shard->elems = mdtab;
480 shard->capacity = capacity;
481
482 GPR_TIMER_END("grow_mdtab", 0);
483 }
484
485 static void rehash_mdtab(mdtab_shard *shard) {
486 if (shard->free > shard->capacity / 4) {
487 gc_mdtab(shard);
488 } else {
489 grow_mdtab(shard);
490 }
491 }
492
493 grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdstr *mkey,
494 grpc_mdstr *mvalue) {
495 internal_string *key = (internal_string *)mkey;
496 internal_string *value = (internal_string *)mvalue;
497 uint32_t hash = GRPC_MDSTR_KV_HASH(mkey->hash, mvalue->hash);
498 internal_metadata *md;
499 mdtab_shard *shard = &g_mdtab_shard[SHARD_IDX(hash, LOG2_MDTAB_SHARD_COUNT)];
500 size_t i;
501 size_t idx;
502
503 GPR_TIMER_BEGIN("grpc_mdelem_from_metadata_strings", 0);
504
505 if (is_mdstr_static(mkey) && is_mdstr_static(mvalue)) {
506 for (i = 0; i <= g_static_mdtab_maxprobe; i++) {
507 grpc_mdelem *smd;
508 idx = (hash + i) % GPR_ARRAY_SIZE(g_static_mdtab);
509 smd = g_static_mdtab[idx];
510 if (smd == NULL) break;
511 if (smd->key == mkey && smd->value == mvalue) {
512 GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0);
513 return smd;
514 }
515 }
516 }
517
518 gpr_mu_lock(&shard->mu);
519
520 idx = TABLE_IDX(hash, LOG2_MDTAB_SHARD_COUNT, shard->capacity);
521 /* search for an existing pair */
522 for (md = shard->elems[idx]; md; md = md->bucket_next) {
523 if (md->key == key && md->value == value) {
524 REF_MD_LOCKED(shard, md);
525 GRPC_MDSTR_UNREF((grpc_mdstr *)key);
526 GRPC_MDSTR_UNREF((grpc_mdstr *)value);
527 gpr_mu_unlock(&shard->mu);
528 GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0);
529 return (grpc_mdelem *)md;
530 }
531 }
532
533 /* not found: create a new pair */
534 md = gpr_malloc(sizeof(internal_metadata));
535 gpr_atm_rel_store(&md->refcnt, 2);
536 md->key = key;
537 md->value = value;
538 md->user_data = 0;
539 md->destroy_user_data = 0;
540 md->bucket_next = shard->elems[idx];
541 shard->elems[idx] = md;
542 gpr_mu_init(&md->mu_user_data);
543 #ifdef GRPC_METADATA_REFCOUNT_DEBUG
544 gpr_log(GPR_DEBUG, "ELM NEW:%p:%d: '%s' = '%s'", md,
545 gpr_atm_no_barrier_load(&md->refcnt),
546 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
547 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
548 #endif
549 shard->count++;
550
551 if (shard->count > shard->capacity * 2) {
552 rehash_mdtab(shard);
553 }
554
555 gpr_mu_unlock(&shard->mu);
556
557 GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0);
558
559 return (grpc_mdelem *)md;
560 }
561
562 grpc_mdelem *grpc_mdelem_from_strings(const char *key, const char *value) {
563 return grpc_mdelem_from_metadata_strings(grpc_mdstr_from_string(key),
564 grpc_mdstr_from_string(value));
565 }
566
567 grpc_mdelem *grpc_mdelem_from_slices(gpr_slice key, gpr_slice value) {
568 return grpc_mdelem_from_metadata_strings(grpc_mdstr_from_slice(key),
569 grpc_mdstr_from_slice(value));
570 }
571
572 grpc_mdelem *grpc_mdelem_from_string_and_buffer(const char *key,
573 const uint8_t *value,
574 size_t value_length) {
575 return grpc_mdelem_from_metadata_strings(
576 grpc_mdstr_from_string(key), grpc_mdstr_from_buffer(value, value_length));
577 }
578
579 grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *gmd DEBUG_ARGS) {
580 internal_metadata *md = (internal_metadata *)gmd;
581 if (is_mdelem_static(gmd)) return gmd;
582 #ifdef GRPC_METADATA_REFCOUNT_DEBUG
583 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
584 "ELM REF:%p:%d->%d: '%s' = '%s'", md,
585 gpr_atm_no_barrier_load(&md->refcnt),
586 gpr_atm_no_barrier_load(&md->refcnt) + 1,
587 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
588 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
589 #endif
590 /* we can assume the ref count is >= 1 as the application is calling
591 this function - meaning that no adjustment to mdtab_free is necessary,
592 simplifying the logic here to be just an atomic increment */
593 /* use C assert to have this removed in opt builds */
594 assert(gpr_atm_no_barrier_load(&md->refcnt) >= 2);
595 gpr_atm_no_barrier_fetch_add(&md->refcnt, 1);
596 return gmd;
597 }
598
599 void grpc_mdelem_unref(grpc_mdelem *gmd DEBUG_ARGS) {
600 internal_metadata *md = (internal_metadata *)gmd;
601 if (!md) return;
602 if (is_mdelem_static(gmd)) return;
603 #ifdef GRPC_METADATA_REFCOUNT_DEBUG
604 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
605 "ELM UNREF:%p:%d->%d: '%s' = '%s'", md,
606 gpr_atm_no_barrier_load(&md->refcnt),
607 gpr_atm_no_barrier_load(&md->refcnt) - 1,
608 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
609 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
610 #endif
611 if (2 == gpr_atm_full_fetch_add(&md->refcnt, -1)) {
612 uint32_t hash = GRPC_MDSTR_KV_HASH(md->key->hash, md->value->hash);
613 mdtab_shard *shard =
614 &g_mdtab_shard[SHARD_IDX(hash, LOG2_MDTAB_SHARD_COUNT)];
615 GPR_TIMER_BEGIN("grpc_mdelem_unref.to_zero", 0);
616 gpr_mu_lock(&shard->mu);
617 if (1 == gpr_atm_no_barrier_load(&md->refcnt)) {
618 shard->free++;
619 gpr_atm_no_barrier_store(&md->refcnt, 0);
620 }
621 gpr_mu_unlock(&shard->mu);
622 GPR_TIMER_END("grpc_mdelem_unref.to_zero", 0);
623 }
624 }
625
626 const char *grpc_mdstr_as_c_string(grpc_mdstr *s) {
627 return (const char *)GPR_SLICE_START_PTR(s->slice);
628 }
629
630 grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *gs DEBUG_ARGS) {
631 internal_string *s = (internal_string *)gs;
632 if (is_mdstr_static(gs)) return gs;
633 GPR_ASSERT(gpr_atm_full_fetch_add(&s->refcnt, 1) != 0);
634 return gs;
635 }
636
637 void grpc_mdstr_unref(grpc_mdstr *gs DEBUG_ARGS) {
638 internal_string *s = (internal_string *)gs;
639 if (is_mdstr_static(gs)) return;
640 if (2 == gpr_atm_full_fetch_add(&s->refcnt, -1)) {
641 strtab_shard *shard =
642 &g_strtab_shard[SHARD_IDX(s->hash, LOG2_STRTAB_SHARD_COUNT)];
643 gpr_mu_lock(&shard->mu);
644 if (1 == gpr_atm_no_barrier_load(&s->refcnt)) {
645 internal_destroy_string(shard, s);
646 }
647 gpr_mu_unlock(&shard->mu);
648 }
649 }
650
651 void *grpc_mdelem_get_user_data(grpc_mdelem *md, void (*destroy_func)(void *)) {
652 internal_metadata *im = (internal_metadata *)md;
653 void *result;
654 if (is_mdelem_static(md)) {
655 return (void *)grpc_static_mdelem_user_data[md - grpc_static_mdelem_table];
656 }
657 if (gpr_atm_acq_load(&im->destroy_user_data) == (gpr_atm)destroy_func) {
658 return (void *)gpr_atm_no_barrier_load(&im->user_data);
659 } else {
660 return NULL;
661 }
662 return result;
663 }
664
665 void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
666 void *user_data) {
667 internal_metadata *im = (internal_metadata *)md;
668 GPR_ASSERT(!is_mdelem_static(md));
669 GPR_ASSERT((user_data == NULL) == (destroy_func == NULL));
670 gpr_mu_lock(&im->mu_user_data);
671 if (gpr_atm_no_barrier_load(&im->destroy_user_data)) {
672 /* user data can only be set once */
673 gpr_mu_unlock(&im->mu_user_data);
674 if (destroy_func != NULL) {
675 destroy_func(user_data);
676 }
677 return;
678 }
679 gpr_atm_no_barrier_store(&im->user_data, (gpr_atm)user_data);
680 gpr_atm_rel_store(&im->destroy_user_data, (gpr_atm)destroy_func);
681 gpr_mu_unlock(&im->mu_user_data);
682 }
683
684 gpr_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *gs) {
685 internal_string *s = (internal_string *)gs;
686 gpr_slice slice;
687 strtab_shard *shard =
688 &g_strtab_shard[SHARD_IDX(s->hash, LOG2_STRTAB_SHARD_COUNT)];
689 gpr_mu_lock(&shard->mu);
690 if (!s->has_base64_and_huffman_encoded) {
691 s->base64_and_huffman =
692 grpc_chttp2_base64_encode_and_huffman_compress(s->slice);
693 s->has_base64_and_huffman_encoded = 1;
694 }
695 slice = s->base64_and_huffman;
696 gpr_mu_unlock(&shard->mu);
697 return slice;
698 }
OLDNEW
« no previous file with comments | « third_party/grpc/src/core/transport/metadata.h ('k') | third_party/grpc/src/core/transport/metadata_batch.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698