OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * |
| 3 * Copyright 2015, Google Inc. |
| 4 * All rights reserved. |
| 5 * |
| 6 * Redistribution and use in source and binary forms, with or without |
| 7 * modification, are permitted provided that the following conditions are |
| 8 * met: |
| 9 * |
| 10 * * Redistributions of source code must retain the above copyright |
| 11 * notice, this list of conditions and the following disclaimer. |
| 12 * * Redistributions in binary form must reproduce the above |
| 13 * copyright notice, this list of conditions and the following disclaimer |
| 14 * in the documentation and/or other materials provided with the |
| 15 * distribution. |
| 16 * * Neither the name of Google Inc. nor the names of its |
| 17 * contributors may be used to endorse or promote products derived from |
| 18 * this software without specific prior written permission. |
| 19 * |
| 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 * |
| 32 */ |
| 33 |
| 34 /* Available log space is divided up in blocks of |
| 35 CENSUS_LOG_2_MAX_RECORD_SIZE bytes. A block can be in one of the |
| 36 following three data structures: |
| 37 - Free blocks (free_block_list) |
| 38 - Blocks with unread data (dirty_block_list) |
| 39 - Blocks currently attached to cores (core_local_blocks[]) |
| 40 |
| 41 census_log_start_write() moves a block from core_local_blocks[] to the |
| 42 end of dirty_block_list when block: |
| 43 - is out-of-space OR |
| 44 - has an incomplete record (an incomplete record occurs when a thread calls |
| 45 census_log_start_write() and is context-switched before calling |
| 46 census_log_end_write() |
| 47 So, blocks in dirty_block_list are ordered, from oldest to newest, by time |
| 48 when block is detached from the core. |
| 49 |
| 50 census_log_read_next() first iterates over dirty_block_list and then |
| 51 core_local_blocks[]. It moves completely read blocks from dirty_block_list |
| 52 to free_block_list. Blocks in core_local_blocks[] are not freed, even when |
| 53 completely read. |
| 54 |
| 55 If log is configured to discard old records and free_block_list is empty, |
| 56 census_log_start_write() iterates over dirty_block_list to allocate a |
| 57 new block. It moves the oldest available block (no pending read/write) to |
| 58 core_local_blocks[]. |
| 59 |
| 60 core_local_block_struct is used to implement a map from core id to the block |
| 61 associated with that core. This mapping is advisory. It is possible that the |
| 62 block returned by this mapping is no longer associated with that core. This |
| 63 mapping is updated, lazily, by census_log_start_write(). |
| 64 |
| 65 Locking in block struct: |
| 66 |
| 67 Exclusive g_log.lock must be held before calling any functions operatong on |
| 68 block structs except census_log_start_write() and |
| 69 census_log_end_write(). |
| 70 |
| 71 Writes to a block are serialized via writer_lock. |
| 72 census_log_start_write() acquires this lock and |
| 73 census_log_end_write() releases it. On failure to acquire the lock, |
| 74 writer allocates a new block for the current core and updates |
| 75 core_local_block accordingly. |
| 76 |
| 77 Simultaneous read and write access is allowed. Reader can safely read up to |
| 78 committed bytes (bytes_committed). |
| 79 |
| 80 reader_lock protects the block, currently being read, from getting recycled. |
| 81 start_read() acquires reader_lock and end_read() releases the lock. |
| 82 |
| 83 Read/write access to a block is disabled via try_disable_access(). It returns |
| 84 with both writer_lock and reader_lock held. These locks are subsequently |
| 85 released by enable_access() to enable access to the block. |
| 86 |
| 87 A note on naming: Most function/struct names are prepended by cl_ |
| 88 (shorthand for census_log). Further, functions that manipulate structures |
| 89 include the name of the structure, which will be passed as the first |
| 90 argument. E.g. cl_block_initialize() will initialize a cl_block. |
| 91 */ |
| 92 #include "src/core/statistics/census_log.h" |
| 93 #include <string.h> |
| 94 #include <grpc/support/alloc.h> |
| 95 #include <grpc/support/atm.h> |
| 96 #include <grpc/support/cpu.h> |
| 97 #include <grpc/support/log.h> |
| 98 #include <grpc/support/port_platform.h> |
| 99 #include <grpc/support/sync.h> |
| 100 #include <grpc/support/useful.h> |
| 101 |
| 102 /* End of platform specific code */ |
| 103 |
| 104 typedef struct census_log_block_list_struct { |
| 105 struct census_log_block_list_struct *next; |
| 106 struct census_log_block_list_struct *prev; |
| 107 struct census_log_block *block; |
| 108 } cl_block_list_struct; |
| 109 |
| 110 typedef struct census_log_block { |
| 111 /* Pointer to underlying buffer */ |
| 112 char *buffer; |
| 113 gpr_atm writer_lock; |
| 114 gpr_atm reader_lock; |
| 115 /* Keeps completely written bytes. Declared atomic because accessed |
| 116 simultaneously by reader and writer. */ |
| 117 gpr_atm bytes_committed; |
| 118 /* Bytes already read */ |
| 119 int32_t bytes_read; |
| 120 /* Links for list */ |
| 121 cl_block_list_struct link; |
| 122 /* We want this structure to be cacheline aligned. We assume the following |
| 123 sizes for the various parts on 32/64bit systems: |
| 124 type 32b size 64b size |
| 125 char* 4 8 |
| 126 3x gpr_atm 12 24 |
| 127 int32_t 4 8 (assumes padding) |
| 128 cl_block_list_struct 12 24 |
| 129 TOTAL 32 64 |
| 130 |
| 131 Depending on the size of our cacheline and the architecture, we |
| 132 selectively add char buffering to this structure. The size is checked |
| 133 via assert in census_log_initialize(). */ |
| 134 #if defined(GPR_ARCH_64) |
| 135 #define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 64) |
| 136 #else |
| 137 #if defined(GPR_ARCH_32) |
| 138 #define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 32) |
| 139 #else |
| 140 #error "Unknown architecture" |
| 141 #endif |
| 142 #endif |
| 143 #if CL_BLOCK_PAD_SIZE > 0 |
| 144 char padding[CL_BLOCK_PAD_SIZE]; |
| 145 #endif |
| 146 } cl_block; |
| 147 |
| 148 /* A list of cl_blocks, doubly-linked through cl_block::link. */ |
| 149 typedef struct census_log_block_list { |
| 150 int32_t count; /* Number of items in list. */ |
| 151 cl_block_list_struct ht; /* head/tail of linked list. */ |
| 152 } cl_block_list; |
| 153 |
| 154 /* Cacheline aligned block pointers to avoid false sharing. Block pointer must |
| 155 be initialized via set_block(), before calling other functions */ |
| 156 typedef struct census_log_core_local_block { |
| 157 gpr_atm block; |
| 158 /* Ensure cachline alignment: we assume sizeof(gpr_atm) == 4 or 8 */ |
| 159 #if defined(GPR_ARCH_64) |
| 160 #define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 8) |
| 161 #else |
| 162 #if defined(GPR_ARCH_32) |
| 163 #define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 4) |
| 164 #else |
| 165 #error "Unknown architecture" |
| 166 #endif |
| 167 #endif |
| 168 #if CL_CORE_LOCAL_BLOCK_PAD_SIZE > 0 |
| 169 char padding[CL_CORE_LOCAL_BLOCK_PAD_SIZE]; |
| 170 #endif |
| 171 } cl_core_local_block; |
| 172 |
| 173 struct census_log { |
| 174 int discard_old_records; |
| 175 /* Number of cores (aka hardware-contexts) */ |
| 176 unsigned num_cores; |
| 177 /* number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log */ |
| 178 int32_t num_blocks; |
| 179 cl_block *blocks; /* Block metadata. */ |
| 180 cl_core_local_block *core_local_blocks; /* Keeps core to block mappings. */ |
| 181 gpr_mu lock; |
| 182 int initialized; /* has log been initialized? */ |
| 183 /* Keeps the state of the reader iterator. A value of 0 indicates that |
| 184 iterator has reached the end. census_log_init_reader() resets the |
| 185 value to num_core to restart iteration. */ |
| 186 uint32_t read_iterator_state; |
| 187 /* Points to the block being read. If non-NULL, the block is locked for |
| 188 reading (block_being_read_->reader_lock is held). */ |
| 189 cl_block *block_being_read; |
| 190 /* A non-zero value indicates that log is full. */ |
| 191 gpr_atm is_full; |
| 192 char *buffer; |
| 193 cl_block_list free_block_list; |
| 194 cl_block_list dirty_block_list; |
| 195 gpr_atm out_of_space_count; |
| 196 }; |
| 197 |
| 198 /* Single internal log */ |
| 199 static struct census_log g_log; |
| 200 |
| 201 /* Functions that operate on an atomic memory location used as a lock */ |
| 202 |
| 203 /* Returns non-zero if lock is acquired */ |
| 204 static int cl_try_lock(gpr_atm *lock) { return gpr_atm_acq_cas(lock, 0, 1); } |
| 205 |
| 206 static void cl_unlock(gpr_atm *lock) { gpr_atm_rel_store(lock, 0); } |
| 207 |
| 208 /* Functions that operate on cl_core_local_block's */ |
| 209 |
| 210 static void cl_core_local_block_set_block(cl_core_local_block *clb, |
| 211 cl_block *block) { |
| 212 gpr_atm_rel_store(&clb->block, (gpr_atm)block); |
| 213 } |
| 214 |
| 215 static cl_block *cl_core_local_block_get_block(cl_core_local_block *clb) { |
| 216 return (cl_block *)gpr_atm_acq_load(&clb->block); |
| 217 } |
| 218 |
| 219 /* Functions that operate on cl_block_list_struct's */ |
| 220 |
| 221 static void cl_block_list_struct_initialize(cl_block_list_struct *bls, |
| 222 cl_block *block) { |
| 223 bls->next = bls->prev = bls; |
| 224 bls->block = block; |
| 225 } |
| 226 |
| 227 /* Functions that operate on cl_block_list's */ |
| 228 |
| 229 static void cl_block_list_initialize(cl_block_list *list) { |
| 230 list->count = 0; |
| 231 cl_block_list_struct_initialize(&list->ht, NULL); |
| 232 } |
| 233 |
| 234 /* Returns head of *this, or NULL if empty. */ |
| 235 static cl_block *cl_block_list_head(cl_block_list *list) { |
| 236 return list->ht.next->block; |
| 237 } |
| 238 |
| 239 /* Insert element *e after *pos. */ |
| 240 static void cl_block_list_insert(cl_block_list *list, cl_block_list_struct *pos, |
| 241 cl_block_list_struct *e) { |
| 242 list->count++; |
| 243 e->next = pos->next; |
| 244 e->prev = pos; |
| 245 e->next->prev = e; |
| 246 e->prev->next = e; |
| 247 } |
| 248 |
| 249 /* Insert block at the head of the list */ |
| 250 static void cl_block_list_insert_at_head(cl_block_list *list, cl_block *block) { |
| 251 cl_block_list_insert(list, &list->ht, &block->link); |
| 252 } |
| 253 |
| 254 /* Insert block at the tail of the list */ |
| 255 static void cl_block_list_insert_at_tail(cl_block_list *list, cl_block *block) { |
| 256 cl_block_list_insert(list, list->ht.prev, &block->link); |
| 257 } |
| 258 |
| 259 /* Removes block *b. Requires *b be in the list. */ |
| 260 static void cl_block_list_remove(cl_block_list *list, cl_block *b) { |
| 261 list->count--; |
| 262 b->link.next->prev = b->link.prev; |
| 263 b->link.prev->next = b->link.next; |
| 264 } |
| 265 |
| 266 /* Functions that operate on cl_block's */ |
| 267 |
| 268 static void cl_block_initialize(cl_block *block, char *buffer) { |
| 269 block->buffer = buffer; |
| 270 gpr_atm_rel_store(&block->writer_lock, 0); |
| 271 gpr_atm_rel_store(&block->reader_lock, 0); |
| 272 gpr_atm_rel_store(&block->bytes_committed, 0); |
| 273 block->bytes_read = 0; |
| 274 cl_block_list_struct_initialize(&block->link, block); |
| 275 } |
| 276 |
| 277 /* Guards against exposing partially written buffer to the reader. */ |
| 278 static void cl_block_set_bytes_committed(cl_block *block, |
| 279 int32_t bytes_committed) { |
| 280 gpr_atm_rel_store(&block->bytes_committed, bytes_committed); |
| 281 } |
| 282 |
| 283 static int32_t cl_block_get_bytes_committed(cl_block *block) { |
| 284 return gpr_atm_acq_load(&block->bytes_committed); |
| 285 } |
| 286 |
| 287 /* Tries to disable future read/write access to this block. Succeeds if: |
| 288 - no in-progress write AND |
| 289 - no in-progress read AND |
| 290 - 'discard_data' set to true OR no unread data |
| 291 On success, clears the block state and returns with writer_lock_ and |
| 292 reader_lock_ held. These locks are released by a subsequent |
| 293 cl_block_access_enable() call. */ |
| 294 static int cl_block_try_disable_access(cl_block *block, int discard_data) { |
| 295 if (!cl_try_lock(&block->writer_lock)) { |
| 296 return 0; |
| 297 } |
| 298 if (!cl_try_lock(&block->reader_lock)) { |
| 299 cl_unlock(&block->writer_lock); |
| 300 return 0; |
| 301 } |
| 302 if (!discard_data && |
| 303 (block->bytes_read != cl_block_get_bytes_committed(block))) { |
| 304 cl_unlock(&block->reader_lock); |
| 305 cl_unlock(&block->writer_lock); |
| 306 return 0; |
| 307 } |
| 308 cl_block_set_bytes_committed(block, 0); |
| 309 block->bytes_read = 0; |
| 310 return 1; |
| 311 } |
| 312 |
| 313 static void cl_block_enable_access(cl_block *block) { |
| 314 cl_unlock(&block->reader_lock); |
| 315 cl_unlock(&block->writer_lock); |
| 316 } |
| 317 |
| 318 /* Returns with writer_lock held. */ |
| 319 static void *cl_block_start_write(cl_block *block, size_t size) { |
| 320 int32_t bytes_committed; |
| 321 if (!cl_try_lock(&block->writer_lock)) { |
| 322 return NULL; |
| 323 } |
| 324 bytes_committed = cl_block_get_bytes_committed(block); |
| 325 if (bytes_committed + size > CENSUS_LOG_MAX_RECORD_SIZE) { |
| 326 cl_unlock(&block->writer_lock); |
| 327 return NULL; |
| 328 } |
| 329 return block->buffer + bytes_committed; |
| 330 } |
| 331 |
| 332 /* Releases writer_lock and increments committed bytes by 'bytes_written'. |
| 333 'bytes_written' must be <= 'size' specified in the corresponding |
| 334 StartWrite() call. This function is thread-safe. */ |
| 335 static void cl_block_end_write(cl_block *block, size_t bytes_written) { |
| 336 cl_block_set_bytes_committed( |
| 337 block, cl_block_get_bytes_committed(block) + bytes_written); |
| 338 cl_unlock(&block->writer_lock); |
| 339 } |
| 340 |
| 341 /* Returns a pointer to the first unread byte in buffer. The number of bytes |
| 342 available are returned in 'bytes_available'. Acquires reader lock that is |
| 343 released by a subsequent cl_block_end_read() call. Returns NULL if: |
| 344 - read in progress |
| 345 - no data available */ |
| 346 static void *cl_block_start_read(cl_block *block, size_t *bytes_available) { |
| 347 void *record; |
| 348 if (!cl_try_lock(&block->reader_lock)) { |
| 349 return NULL; |
| 350 } |
| 351 /* bytes_committed may change from under us. Use bytes_available to update |
| 352 bytes_read below. */ |
| 353 *bytes_available = cl_block_get_bytes_committed(block) - block->bytes_read; |
| 354 if (*bytes_available == 0) { |
| 355 cl_unlock(&block->reader_lock); |
| 356 return NULL; |
| 357 } |
| 358 record = block->buffer + block->bytes_read; |
| 359 block->bytes_read += *bytes_available; |
| 360 return record; |
| 361 } |
| 362 |
| 363 static void cl_block_end_read(cl_block *block) { |
| 364 cl_unlock(&block->reader_lock); |
| 365 } |
| 366 |
| 367 /* Internal functions operating on g_log */ |
| 368 |
| 369 /* Allocates a new free block (or recycles an available dirty block if log is |
| 370 configured to discard old records). Returns NULL if out-of-space. */ |
| 371 static cl_block *cl_allocate_block(void) { |
| 372 cl_block *block = cl_block_list_head(&g_log.free_block_list); |
| 373 if (block != NULL) { |
| 374 cl_block_list_remove(&g_log.free_block_list, block); |
| 375 return block; |
| 376 } |
| 377 if (!g_log.discard_old_records) { |
| 378 /* No free block and log is configured to keep old records. */ |
| 379 return NULL; |
| 380 } |
| 381 /* Recycle dirty block. Start from the oldest. */ |
| 382 for (block = cl_block_list_head(&g_log.dirty_block_list); block != NULL; |
| 383 block = block->link.next->block) { |
| 384 if (cl_block_try_disable_access(block, 1 /* discard data */)) { |
| 385 cl_block_list_remove(&g_log.dirty_block_list, block); |
| 386 return block; |
| 387 } |
| 388 } |
| 389 return NULL; |
| 390 } |
| 391 |
| 392 /* Allocates a new block and updates core id => block mapping. 'old_block' |
| 393 points to the block that the caller thinks is attached to |
| 394 'core_id'. 'old_block' may be NULL. Returns non-zero if: |
| 395 - allocated a new block OR |
| 396 - 'core_id' => 'old_block' mapping changed (another thread allocated a |
| 397 block before lock was acquired). */ |
| 398 static int cl_allocate_core_local_block(int32_t core_id, cl_block *old_block) { |
| 399 /* Now that we have the lock, check if core-local mapping has changed. */ |
| 400 cl_core_local_block *core_local_block = &g_log.core_local_blocks[core_id]; |
| 401 cl_block *block = cl_core_local_block_get_block(core_local_block); |
| 402 if ((block != NULL) && (block != old_block)) { |
| 403 return 1; |
| 404 } |
| 405 if (block != NULL) { |
| 406 cl_core_local_block_set_block(core_local_block, NULL); |
| 407 cl_block_list_insert_at_tail(&g_log.dirty_block_list, block); |
| 408 } |
| 409 block = cl_allocate_block(); |
| 410 if (block == NULL) { |
| 411 gpr_atm_rel_store(&g_log.is_full, 1); |
| 412 return 0; |
| 413 } |
| 414 cl_core_local_block_set_block(core_local_block, block); |
| 415 cl_block_enable_access(block); |
| 416 return 1; |
| 417 } |
| 418 |
| 419 static cl_block *cl_get_block(void *record) { |
| 420 uintptr_t p = (uintptr_t)((char *)record - g_log.buffer); |
| 421 uintptr_t index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE; |
| 422 return &g_log.blocks[index]; |
| 423 } |
| 424 |
| 425 /* Gets the next block to read and tries to free 'prev' block (if not NULL). |
| 426 Returns NULL if reached the end. */ |
| 427 static cl_block *cl_next_block_to_read(cl_block *prev) { |
| 428 cl_block *block = NULL; |
| 429 if (g_log.read_iterator_state == g_log.num_cores) { |
| 430 /* We are traversing dirty list; find the next dirty block. */ |
| 431 if (prev != NULL) { |
| 432 /* Try to free the previous block if there is no unread data. This block |
| 433 may have unread data if previously incomplete record completed between |
| 434 read_next() calls. */ |
| 435 block = prev->link.next->block; |
| 436 if (cl_block_try_disable_access(prev, 0 /* do not discard data */)) { |
| 437 cl_block_list_remove(&g_log.dirty_block_list, prev); |
| 438 cl_block_list_insert_at_head(&g_log.free_block_list, prev); |
| 439 gpr_atm_rel_store(&g_log.is_full, 0); |
| 440 } |
| 441 } else { |
| 442 block = cl_block_list_head(&g_log.dirty_block_list); |
| 443 } |
| 444 if (block != NULL) { |
| 445 return block; |
| 446 } |
| 447 /* We are done with the dirty list; moving on to core-local blocks. */ |
| 448 } |
| 449 while (g_log.read_iterator_state > 0) { |
| 450 g_log.read_iterator_state--; |
| 451 block = cl_core_local_block_get_block( |
| 452 &g_log.core_local_blocks[g_log.read_iterator_state]); |
| 453 if (block != NULL) { |
| 454 return block; |
| 455 } |
| 456 } |
| 457 return NULL; |
| 458 } |
| 459 |
| 460 /* External functions: primary stats_log interface */ |
| 461 void census_log_initialize(size_t size_in_mb, int discard_old_records) { |
| 462 int32_t ix; |
| 463 /* Check cacheline alignment. */ |
| 464 GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0); |
| 465 GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0); |
| 466 GPR_ASSERT(!g_log.initialized); |
| 467 g_log.discard_old_records = discard_old_records; |
| 468 g_log.num_cores = gpr_cpu_num_cores(); |
| 469 /* Ensure at least as many blocks as there are cores. */ |
| 470 g_log.num_blocks = GPR_MAX( |
| 471 g_log.num_cores, (size_in_mb << 20) >> CENSUS_LOG_2_MAX_RECORD_SIZE); |
| 472 gpr_mu_init(&g_log.lock); |
| 473 g_log.read_iterator_state = 0; |
| 474 g_log.block_being_read = NULL; |
| 475 gpr_atm_rel_store(&g_log.is_full, 0); |
| 476 g_log.core_local_blocks = (cl_core_local_block *)gpr_malloc_aligned( |
| 477 g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG); |
| 478 memset(g_log.core_local_blocks, 0, |
| 479 g_log.num_cores * sizeof(cl_core_local_block)); |
| 480 g_log.blocks = (cl_block *)gpr_malloc_aligned( |
| 481 g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG); |
| 482 memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block)); |
| 483 g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); |
| 484 memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); |
| 485 cl_block_list_initialize(&g_log.free_block_list); |
| 486 cl_block_list_initialize(&g_log.dirty_block_list); |
| 487 for (ix = 0; ix < g_log.num_blocks; ++ix) { |
| 488 cl_block *block = g_log.blocks + ix; |
| 489 cl_block_initialize(block, |
| 490 g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * ix)); |
| 491 cl_block_try_disable_access(block, 1 /* discard data */); |
| 492 cl_block_list_insert_at_tail(&g_log.free_block_list, block); |
| 493 } |
| 494 gpr_atm_rel_store(&g_log.out_of_space_count, 0); |
| 495 g_log.initialized = 1; |
| 496 } |
| 497 |
| 498 void census_log_shutdown(void) { |
| 499 GPR_ASSERT(g_log.initialized); |
| 500 gpr_mu_destroy(&g_log.lock); |
| 501 gpr_free_aligned(g_log.core_local_blocks); |
| 502 g_log.core_local_blocks = NULL; |
| 503 gpr_free_aligned(g_log.blocks); |
| 504 g_log.blocks = NULL; |
| 505 gpr_free(g_log.buffer); |
| 506 g_log.buffer = NULL; |
| 507 g_log.initialized = 0; |
| 508 } |
| 509 |
| 510 void *census_log_start_write(size_t size) { |
| 511 /* Used to bound number of times block allocation is attempted. */ |
| 512 int32_t attempts_remaining = g_log.num_blocks; |
| 513 /* TODO(aveitch): move this inside the do loop when current_cpu is fixed */ |
| 514 int32_t core_id = gpr_cpu_current_cpu(); |
| 515 GPR_ASSERT(g_log.initialized); |
| 516 if (size > CENSUS_LOG_MAX_RECORD_SIZE) { |
| 517 return NULL; |
| 518 } |
| 519 do { |
| 520 int allocated; |
| 521 void *record = NULL; |
| 522 cl_block *block = |
| 523 cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]); |
| 524 if (block && (record = cl_block_start_write(block, size))) { |
| 525 return record; |
| 526 } |
| 527 /* Need to allocate a new block. We are here if: |
| 528 - No block associated with the core OR |
| 529 - Write in-progress on the block OR |
| 530 - block is out of space */ |
| 531 if (gpr_atm_acq_load(&g_log.is_full)) { |
| 532 gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); |
| 533 return NULL; |
| 534 } |
| 535 gpr_mu_lock(&g_log.lock); |
| 536 allocated = cl_allocate_core_local_block(core_id, block); |
| 537 gpr_mu_unlock(&g_log.lock); |
| 538 if (!allocated) { |
| 539 gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); |
| 540 return NULL; |
| 541 } |
| 542 } while (attempts_remaining--); |
| 543 /* Give up. */ |
| 544 gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); |
| 545 return NULL; |
| 546 } |
| 547 |
| 548 void census_log_end_write(void *record, size_t bytes_written) { |
| 549 GPR_ASSERT(g_log.initialized); |
| 550 cl_block_end_write(cl_get_block(record), bytes_written); |
| 551 } |
| 552 |
| 553 void census_log_init_reader(void) { |
| 554 GPR_ASSERT(g_log.initialized); |
| 555 gpr_mu_lock(&g_log.lock); |
| 556 /* If a block is locked for reading unlock it. */ |
| 557 if (g_log.block_being_read != NULL) { |
| 558 cl_block_end_read(g_log.block_being_read); |
| 559 g_log.block_being_read = NULL; |
| 560 } |
| 561 g_log.read_iterator_state = g_log.num_cores; |
| 562 gpr_mu_unlock(&g_log.lock); |
| 563 } |
| 564 |
| 565 const void *census_log_read_next(size_t *bytes_available) { |
| 566 GPR_ASSERT(g_log.initialized); |
| 567 gpr_mu_lock(&g_log.lock); |
| 568 if (g_log.block_being_read != NULL) { |
| 569 cl_block_end_read(g_log.block_being_read); |
| 570 } |
| 571 do { |
| 572 g_log.block_being_read = cl_next_block_to_read(g_log.block_being_read); |
| 573 if (g_log.block_being_read != NULL) { |
| 574 void *record = |
| 575 cl_block_start_read(g_log.block_being_read, bytes_available); |
| 576 if (record != NULL) { |
| 577 gpr_mu_unlock(&g_log.lock); |
| 578 return record; |
| 579 } |
| 580 } |
| 581 } while (g_log.block_being_read != NULL); |
| 582 gpr_mu_unlock(&g_log.lock); |
| 583 return NULL; |
| 584 } |
| 585 |
| 586 size_t census_log_remaining_space(void) { |
| 587 size_t space; |
| 588 GPR_ASSERT(g_log.initialized); |
| 589 gpr_mu_lock(&g_log.lock); |
| 590 if (g_log.discard_old_records) { |
| 591 /* Remaining space is not meaningful; just return the entire log space. */ |
| 592 space = g_log.num_blocks << CENSUS_LOG_2_MAX_RECORD_SIZE; |
| 593 } else { |
| 594 space = g_log.free_block_list.count * CENSUS_LOG_MAX_RECORD_SIZE; |
| 595 } |
| 596 gpr_mu_unlock(&g_log.lock); |
| 597 return space; |
| 598 } |
| 599 |
| 600 int census_log_out_of_space_count(void) { |
| 601 GPR_ASSERT(g_log.initialized); |
| 602 return gpr_atm_acq_load(&g_log.out_of_space_count); |
| 603 } |
OLD | NEW |