OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/memory/persistent_memory_allocator.h" | |
6 | |
7 #include <assert.h> | |
8 #include <algorithm> | |
9 | |
10 #include "base/logging.h" | |
11 #include "base/metrics/histogram_macros.h" | |
12 | |
13 // All integer constants in this file are signed because Atomic32 is signed | |
14 // and keeping all others consistent with this avoids a lot of unnecessary | |
15 // casting to avoid signed/unsigned operations just to avoid compiler errors. | |
16 // This means an occasonal cast of a constant from sizeof() to "int" but | |
17 // is far simpler than the alternative. Only the external interface uses | |
18 // size_t for simplicity to the caller. | |
19 | |
20 namespace { | |
21 | |
22 // Required range of memory segment sizes. It has to fit in a signed 32-bit | |
23 // number and should be a power of 2 in order to accomodate almost any page | |
24 // size. | |
25 const int32_t kSegmentMinSize = 1 << 10; // 1 KiB | |
26 const int32_t kSegmentMaxSize = 1 << 30; // 1 GiB | |
27 | |
28 // All allocations and data-structures must be aligned to this byte boundary. | |
29 // Alignment as large as the physical bus between CPU and RAM is _required_ | |
30 // for some architectures, is simply more efficient on other CPUs, and | |
31 // generally a Good Idea(tm) for all platforms as it reduces/eliminates the | |
32 // chance that a type will span cache lines. Alignment mustn't be less | |
33 // than 8 to ensure proper alignment for all types. The rest is a balance | |
34 // between reducing spans across multiple cache lines and wasted space spent | |
35 // padding out allocations. An alignment of 16 would ensure that the block | |
36 // header structure always sits in a single cache line. An average of about | |
37 // 1/2 this value will be wasted with every allocation. | |
38 const int32_t kAllocAlignment = 8; | |
39 | |
40 // A constant (random) value placed in the shared metadata to identify | |
41 // an already initialized memory segment. | |
42 const int32_t kGlobalCookie = 0x408305DC; | |
43 | |
44 // The current version of the metadata. If updates are made that change | |
45 // the metadata, the version number can be queried to operate in a backward- | |
46 // compatible manner until the memory segment is completely re-initalized. | |
47 const int32_t kGlobalVersion = 1; | |
48 | |
49 // Constant values placed in the block headers to indicate its state. | |
50 const int32_t kBlockCookieFree = 0; | |
51 const int32_t kBlockCookieQueue = 1; | |
52 const int32_t kBlockCookieWasted = -1; | |
53 const int32_t kBlockCookieAllocated = 0xC8799269; | |
54 | |
55 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char> | |
56 // types rather than combined bitfield. | |
57 | |
58 // Flags stored in the flags_ field of the SharedMetaData structure below. | |
59 enum : int32_t { | |
60 kFlagCorrupt = 1 << 0, | |
61 kFlagFull = 1 << 1 | |
62 }; | |
63 | |
64 bool CheckFlag(base::subtle::Atomic32* flags, int flag) { | |
65 base::subtle::Atomic32 loaded_flags = base::subtle::Acquire_Load(flags); | |
66 return (loaded_flags & flag) != 0; | |
67 } | |
68 | |
69 void SetFlag(base::subtle::Atomic32* flags, int flag) { | |
70 for (;;) { | |
71 base::subtle::Atomic32 loaded_flags = base::subtle::Acquire_Load(flags); | |
72 base::subtle::Atomic32 new_flags = | |
73 (loaded_flags & ~flag) | flag; | |
74 if (base::subtle::Release_CompareAndSwap( | |
75 flags, loaded_flags, new_flags) == loaded_flags) { | |
76 break; | |
77 } | |
78 } | |
79 } | |
80 | |
81 } // namespace | |
82 | |
83 namespace base { | |
84 | |
85 // The block-header is placed at the top of every allocation within the | |
86 // segment to describe the data that follows it. | |
87 struct PersistentMemoryAllocator::BlockHeader { | |
88 int32_t size; // Number of bytes in this block, including header. | |
89 int32_t cookie; // Constant value indicating completed allocation. | |
90 uint32_t type_id; // A number provided by caller indicating data type. | |
91 subtle::Atomic32 next; // Pointer to the next block when iterating. | |
92 }; | |
93 | |
94 // The shared metadata exists once at the top of the memory segment to | |
95 // describe the state of the allocator to all processes. | |
96 struct PersistentMemoryAllocator::SharedMetadata { | |
97 int32_t cookie; // Some value that indicates complete initialization. | |
98 int32_t size; // Total size of memory segment. | |
99 int32_t page_size; // Paging size within memory segment. | |
100 int32_t version; // Version code so upgrades don't break. | |
101 subtle::Atomic32 freeptr; // Offset/ref to first free space in the segment. | |
102 subtle::Atomic32 flags; // Bitfield of information flags. | |
103 int32_t name; // Reference to stored name string. | |
104 | |
105 // The "iterable" queue is an M&S Queue as described here, append-only: | |
106 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf | |
107 subtle::Atomic32 tailptr; // Last block available for iteration. | |
108 BlockHeader queue; // Empty block for linked-list head/tail. (must be last) | |
109 }; | |
110 | |
111 // The "queue" block header is used to detect "last node" so that zero/null | |
112 // can be used to indicate that it hasn't been added at all. It is part of | |
113 // the SharedMetadata structure which itself is always located at offset zero. | |
114 const PersistentMemoryAllocator::Reference | |
115 PersistentMemoryAllocator::kReferenceQueue = | |
116 offsetof(SharedMetadata, queue); | |
117 const PersistentMemoryAllocator::Reference | |
118 PersistentMemoryAllocator::kReferenceNull = 0; | |
119 | |
120 PersistentMemoryAllocator::PersistentMemoryAllocator(void* base, | |
121 size_t size, | |
122 size_t page_size, | |
123 const std::string& name) | |
124 : mem_base_(static_cast<char*>(base)), | |
125 mem_size_(static_cast<int32_t>(size)), | |
126 mem_page_(static_cast<int32_t>((page_size ? page_size : size))), | |
127 corrupted_(0), | |
128 allocs_histogram_(nullptr), | |
129 used_histogram_(nullptr) { | |
130 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, | |
131 "BlockHeader is not a multiple of kAllocAlignment"); | |
132 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0, | |
133 "SharedMetadata is not a multiple of kAllocAlignment"); | |
134 | |
135 CHECK(base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0); | |
136 CHECK(size >= kSegmentMinSize && size <= kSegmentMaxSize && | |
137 size % kAllocAlignment == 0); | |
138 CHECK(page_size == 0 || size % page_size == 0); | |
139 | |
140 if (shared_meta()->cookie != kGlobalCookie) { | |
141 // This block is only executed when a completely new memory segment is | |
142 // being initialized. It's unshared and single-threaded... | |
143 const BlockHeader* first_block = reinterpret_cast<BlockHeader*>( | |
144 mem_base_ + sizeof(SharedMetadata)); | |
145 if (shared_meta()->cookie != 0 || | |
146 shared_meta()->size != 0 || | |
147 shared_meta()->version != 0 || | |
148 subtle::NoBarrier_Load(&shared_meta()->freeptr) != 0 || | |
149 subtle::NoBarrier_Load(&shared_meta()->flags) != 0 || | |
150 shared_meta()->name != 0 || | |
151 shared_meta()->tailptr != 0 || | |
152 shared_meta()->queue.cookie != 0 || | |
153 subtle::NoBarrier_Load(&shared_meta()->queue.next) != 0 || | |
154 first_block->size != 0 || | |
155 first_block->cookie != 0 || | |
156 first_block->type_id != 0 || | |
157 first_block->next != 0) { | |
158 // ...or something malicious has been playing with the metadata. | |
159 NOTREACHED(); | |
160 SetCorrupt(); | |
161 } | |
162 | |
163 // This is still safe to do even if corruption has been detected. | |
164 shared_meta()->cookie = kGlobalCookie; | |
165 shared_meta()->size = mem_size_; | |
166 shared_meta()->page_size = mem_page_; | |
167 shared_meta()->version = kGlobalVersion; | |
168 subtle::NoBarrier_Store(&shared_meta()->freeptr, sizeof(SharedMetadata)); | |
169 | |
170 // Set up the queue of iterable allocations. | |
171 shared_meta()->queue.size = sizeof(BlockHeader); | |
172 shared_meta()->queue.cookie = kBlockCookieQueue; | |
173 subtle::NoBarrier_Store(&shared_meta()->queue.next, kReferenceQueue); | |
174 subtle::NoBarrier_Store(&shared_meta()->tailptr, kReferenceQueue); | |
175 | |
176 // Allocate space for the name so other processes can learn it. | |
177 if (!name.empty()) { | |
178 const size_t name_length = name.length() + 1; | |
179 shared_meta()->name = Allocate(name_length, 0); | |
180 char* name_cstr = GetAsObject<char>(shared_meta()->name, 0); | |
181 if (name_cstr) | |
182 strcpy(name_cstr, name.c_str()); | |
183 } | |
184 } else { | |
185 // The allocator is attaching to a previously initialized segment of | |
186 // memory. Make sure the embedded data matches what has been passed. | |
187 if (shared_meta()->size != mem_size_ || | |
188 shared_meta()->page_size != mem_page_) { | |
189 NOTREACHED(); | |
190 SetCorrupt(); | |
191 } | |
192 } | |
193 | |
194 // Metrics are created here so there is no recursion from Allocate | |
195 // trying to update a histogram that needs to be created and in turn | |
196 // calls Allocate again. | |
197 // Some metrics are only active on the primary owner. | |
198 if (!name.empty()) { | |
199 used_histogram_ = Histogram::FactoryGet( | |
200 name + ".UsedKiB", 1, 256 << 10, 100, HistogramBase::kNoFlags); | |
201 } | |
202 | |
203 // Other metrics are active on all users of the memory segment. | |
204 Reference name_ref = shared_meta()->name; | |
205 char* name_cstr = GetAsObject<char>(name_ref, 0); | |
206 if (name_cstr) { | |
207 size_t name_length = GetAllocSize(name_ref); | |
208 while (name_length > 0 && name_cstr[name_length - 1] != '\0') | |
209 --name_length; | |
210 if (name_length > 0) { | |
211 std::string shared_name(name_cstr, name_length); | |
212 allocs_histogram_ = Histogram::FactoryGet( | |
213 shared_name + ".Allocs", 1, 10000, 50, HistogramBase::kNoFlags); | |
214 } | |
215 } | |
216 } | |
217 | |
218 PersistentMemoryAllocator::~PersistentMemoryAllocator() {} | |
219 | |
220 size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) { | |
221 BlockHeader* block = GetBlock(ref, 0, 0, false, false); | |
222 if (!block) | |
223 return 0; | |
224 int32_t size = block->size; | |
225 // Header was verified by GetBlock() but a malicious actor could change | |
226 // the value between there and here. Check it again. | |
227 if (size <= (int)sizeof(BlockHeader) || ref + size >= mem_size_) | |
228 return 0; | |
229 return static_cast<size_t>(size - sizeof(BlockHeader)); | |
230 } | |
231 | |
232 int32_t PersistentMemoryAllocator::Allocate(size_t usize, uint32_t type_id) { | |
233 // Round up the requested size, plus header, to the next allocation alignment. | |
234 int32_t size = static_cast<int32_t>(usize + sizeof(BlockHeader)); | |
Alexander Potapenko
2015/11/16 16:01:51
Can "usize + sizeof(BlockHeader)" overflow int32_t
bcwhite
2015/11/17 01:35:08
Yes, but then... umm, yeah. Right. I used to hav
bcwhite
2015/11/17 02:39:26
Oh, wait. There is a "usize > int32::max" as well
Alexander Potapenko
2015/11/17 10:39:15
Consider usize = int32::max - 1.
In the case sizeo
| |
235 size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); | |
236 if (usize > (size_t)std::numeric_limits<int32_t>::max() || | |
237 size <= (int)sizeof(BlockHeader) || size > mem_page_) { | |
238 NOTREACHED(); | |
239 return kReferenceNull; | |
240 } | |
241 | |
242 // Allocation is lockless so we do all our caculation and then, if saving | |
243 // indicates a change has occurred since we started, scrap everything and | |
244 // start over. | |
245 for (;;) { | |
246 if (IsCorrupt()) | |
247 return kReferenceNull; | |
248 | |
249 // Get the current start of unallocated memory. Other threads may | |
250 // update this at any time and cause us to retry these operations. | |
251 const int32_t freeptr = subtle::NoBarrier_Load(&shared_meta()->freeptr); | |
252 if (freeptr + size > mem_size_) { | |
253 SetFlag(&shared_meta()->flags, kFlagFull); | |
254 return kReferenceNull; | |
255 } | |
256 | |
257 // Get pointer to the "free" block. It doesn't even have a header; pass | |
258 // -sizeof(header) so accouting for that will yield an expected size of | |
259 // zero which is what will be stored at that location. If something | |
260 // has been allocated since the load of freeptr above, it is still safe | |
261 // as nothing will be written to that location until after the CAS below. | |
262 BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true); | |
263 if (!block) { | |
264 SetCorrupt(); | |
265 return kReferenceNull; | |
266 } | |
267 | |
268 // An allocation cannot cross page boundaries. If it would, create a | |
269 // "wasted" block and begin again at the top of the next page. This | |
270 // area could just be left empty but we fill in the block header just | |
271 // for completeness sake. | |
272 const int32_t page_free = mem_page_ - freeptr % mem_page_; | |
273 if (size > page_free) { | |
274 if (page_free <= (int)sizeof(BlockHeader)) { | |
275 SetCorrupt(); | |
276 return kReferenceNull; | |
277 } | |
278 const int32_t new_freeptr = freeptr + page_free; | |
279 if (subtle::NoBarrier_CompareAndSwap( | |
280 &shared_meta()->freeptr, freeptr, new_freeptr) == freeptr) { | |
281 block->size = page_free; | |
282 block->cookie = kBlockCookieWasted; | |
283 } | |
284 continue; | |
285 } | |
286 | |
287 // Don't leave a slice at the end of a page too small for anything. This | |
288 // can result in an allocation up to two alignment-sizes greater than the | |
289 // minimum required by requested-size + header + alignment. | |
290 if (page_free - size < (int)(sizeof(BlockHeader) + kAllocAlignment)) | |
291 size = page_free; | |
292 | |
293 const int32_t new_freeptr = freeptr + size; | |
294 if (new_freeptr > mem_size_) { | |
295 SetCorrupt(); | |
296 return kReferenceNull; | |
297 } | |
298 | |
299 if (subtle::NoBarrier_CompareAndSwap( | |
300 &shared_meta()->freeptr, freeptr, new_freeptr) != freeptr) { | |
301 // Another thread must have completed an allocation while we were working. | |
302 // Try again. | |
303 continue; | |
304 } | |
305 | |
306 // Record this allocation in usage stats (if active). This is safe | |
307 // to call at this point because the allocation is complete. | |
308 if (allocs_histogram_) | |
309 allocs_histogram_->Add(static_cast<HistogramBase::Sample>(usize)); | |
310 | |
311 // Given that all memory was zeroed before ever being given to an instance | |
312 // of this class and given that we only allocate in a monotomic fashion | |
313 // going forward, it must be that the newly allocated block is completely | |
314 // full of zeros. If we find anything in the block header that is NOT a | |
315 // zero then something must have previously run amuck through memory, | |
316 // writing beyond the allocated space and into unallocated space. | |
317 if (block->size != 0 || | |
318 block->cookie != kBlockCookieFree || | |
319 block->type_id != 0 || | |
320 subtle::NoBarrier_Load(&block->next) != 0) { | |
321 SetCorrupt(); | |
322 return kReferenceNull; | |
323 } | |
324 | |
325 block->size = size; | |
326 block->cookie = kBlockCookieAllocated; | |
327 block->type_id = type_id; | |
328 return freeptr; | |
329 } | |
330 } | |
331 | |
332 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) { | |
333 int32_t remaining = | |
334 mem_size_ - subtle::NoBarrier_Load(&shared_meta()->freeptr); | |
335 meminfo->total = mem_size_; | |
336 meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader); | |
337 } | |
338 | |
339 void PersistentMemoryAllocator::MakeIterable(Reference ref) { | |
340 if (IsCorrupt()) | |
341 return; | |
342 BlockHeader* block = GetBlock(ref, 0, 0, false, false); | |
343 if (!block) // invalid reference | |
344 return; | |
345 if (subtle::Acquire_Load(&block->next) != 0) // previously set iterable | |
346 return; | |
347 subtle::Release_Store(&block->next, kReferenceQueue); // will be tail block | |
348 | |
349 // Try to add this block to the tail of the queue. May take multiple tries. | |
350 int32_t tail; | |
351 for (;;) { | |
352 // Acquire the current tail-pointer released by previous call to this | |
353 // method and validate it. | |
354 tail = subtle::Acquire_Load(&shared_meta()->tailptr); | |
355 block = GetBlock(tail, 0, 0, true, false); | |
356 if (!block) { | |
357 SetCorrupt(); | |
358 return; | |
359 } | |
360 | |
361 // Try to insert the block at the tail of the queue. The tail node always | |
362 // has an existing value of kReferenceQueue; if that is not the value | |
363 // returned, another thread has acted in the meantime. | |
364 int32_t next = subtle::Release_CompareAndSwap( | |
365 &block->next, kReferenceQueue, ref); | |
366 if (next == kReferenceQueue) { | |
367 // Update the tail pointer to the new offset. If the "else" clause did | |
368 // not exist, then this could be a simple Release_Store to set the new | |
369 // value but because it does, it's possible that other threads could add | |
370 // one or more nodes at the tail before reaching this point. We don't | |
371 // have to check the return value because it either operates correctly | |
372 // or the exact same operation has already been done (by the "else" | |
373 // clause). | |
374 subtle::Release_CompareAndSwap(&shared_meta()->tailptr, tail, ref); | |
375 return; | |
376 } else { | |
377 // In the unlikely case that a thread crashed or was killed between the | |
378 // update of "next" and the update of "tailptr", it is necessary to | |
379 // perform the operation that would have been done. There's no explicit | |
380 // check for crash/kill which means that this operation may also happen | |
381 // even when the other thread is in perfect working order which is what | |
382 // necessitates the CompareAndSwap above. | |
383 subtle::Release_CompareAndSwap(&shared_meta()->tailptr, tail, next); | |
384 } | |
385 } | |
386 } | |
387 | |
388 void PersistentMemoryAllocator::CreateIterator(Iterator* state) { | |
389 state->last = kReferenceQueue; | |
390 state->niter = 0; | |
391 } | |
392 | |
393 int32_t PersistentMemoryAllocator::GetNextIterable(Iterator* state, | |
394 uint32_t* type_id) { | |
395 const BlockHeader* block = GetBlock(state->last, 0, 0, true, false); | |
396 if (!block) // invalid iterator state | |
397 return kReferenceNull; | |
398 | |
399 // The compiler and CPU can freely reorder all memory accesses on which | |
400 // there are no dependencies. It could, for example, move the load of | |
401 // "freeptr" above this point because there are no explicit dependencies | |
402 // between it and "next". If it did, however, then another block could | |
403 // be queued after that but before the following load meaning there is | |
404 // one more queued block than the future "detect loop by having more | |
405 // blocks that could fit before freeptr" will allow. | |
406 // | |
407 // By "acquiring" the "next" value here, it's synchronized to the enqueue | |
408 // of the node which in turn is synchronized to the allocation (which sets | |
409 // freeptr). Thus, the scenario above cannot happen. | |
410 int32_t next = subtle::Acquire_Load(&block->next); | |
411 block = GetBlock(next, 0, 0, false, false); | |
412 if (!block) // no next allocation in queue | |
413 return kReferenceNull; | |
414 | |
415 // Memory corruption could cause a loop in the list. We need to detect | |
416 // that so as to not cause an infinite loop in the caller. We do this | |
417 // simply by making sure we don't iterate more than the absolute maximum | |
418 // number of allocations that could have been made. Callers are likely | |
419 // to loop multiple times before it is detected but at least it stops. | |
420 int32_t freeptr = std::min(subtle::Acquire_Load(&shared_meta()->freeptr), | |
421 mem_size_); | |
422 if (state->niter > freeptr / (sizeof(BlockHeader) + kAllocAlignment)) { | |
423 SetCorrupt(); | |
424 return kReferenceNull; | |
425 } | |
426 | |
427 state->last = next; | |
428 state->niter++; | |
429 *type_id = block->type_id; | |
430 | |
431 return next; | |
432 } | |
433 | |
434 // The "corrupted" state is held both locally and globally (shared). The | |
435 // shared flag can't be trusted since a malicious actor could overwrite it. | |
436 // The local version is immune to foreign actors. Thus, if seen shared, | |
437 // copy it locally and, once known, always restore it globally. | |
438 void PersistentMemoryAllocator::SetCorrupt() { | |
439 LOG(ERROR) << "Corruption detected in shared-memory segment."; | |
440 subtle::NoBarrier_Store(&corrupted_, 1); | |
441 SetFlag(&shared_meta()->flags, kFlagCorrupt); | |
442 } | |
443 | |
444 bool PersistentMemoryAllocator::IsCorrupt() { | |
445 if (subtle::NoBarrier_Load(&corrupted_) || | |
446 CheckFlag(&shared_meta()->flags, kFlagCorrupt)) { | |
447 SetCorrupt(); // Make sure all indicators are set. | |
448 return true; | |
449 } | |
450 return false; | |
451 } | |
452 | |
453 bool PersistentMemoryAllocator::IsFull() { | |
454 return CheckFlag(&shared_meta()->flags, kFlagFull); | |
455 } | |
456 | |
457 // Dereference a block |ref| and ensure that it's valid for the desired | |
458 // |type_id| and |size|. |special| indicates that we may try to access block | |
459 // headers not available to callers but still accessed by this module. By | |
460 // having internal dereferences go through this same function, the allocator | |
461 // is hardened against corruption. | |
462 PersistentMemoryAllocator::BlockHeader* PersistentMemoryAllocator::GetBlock( | |
463 Reference ref, | |
464 uint32_t type_id, | |
465 int32_t size, | |
466 bool queue_ok, | |
467 bool free_ok) { | |
468 // Validation of parameters. | |
469 if (ref % kAllocAlignment != 0) | |
470 return nullptr; | |
471 if (ref < (int)(queue_ok ? kReferenceQueue : sizeof(SharedMetadata))) | |
472 return nullptr; | |
473 size += sizeof(BlockHeader); | |
474 if (ref + size > mem_size_) | |
475 return nullptr; | |
476 | |
477 // Validation of referenced block-header. | |
478 if (!free_ok) { | |
479 int32_t freeptr = subtle::NoBarrier_Load(&shared_meta()->freeptr); | |
480 if (ref + size > freeptr) | |
481 return nullptr; | |
482 const BlockHeader* block = | |
483 reinterpret_cast<BlockHeader*>(mem_base_ + ref); | |
484 if (block->size < size) | |
485 return nullptr; | |
486 if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated) | |
487 return nullptr; | |
488 if (type_id != 0 && block->type_id != type_id) | |
489 return nullptr; | |
490 } | |
491 | |
492 // Return pointer to block data. | |
493 return reinterpret_cast<BlockHeader*>(mem_base_ + ref); | |
494 } | |
495 | |
496 void* PersistentMemoryAllocator::GetBlockData(Reference ref, | |
497 uint32_t type_id, | |
498 int32_t size) { | |
499 DCHECK(size > 0); | |
500 BlockHeader* block = GetBlock(ref, type_id, size, false, false); | |
501 if (!block) | |
502 return nullptr; | |
503 return reinterpret_cast<char*>(block) + sizeof(BlockHeader); | |
504 } | |
505 | |
506 void PersistentMemoryAllocator::UpdateStaticHistograms() { | |
507 if (used_histogram_) { | |
508 MemoryInfo meminfo; | |
509 GetMemoryInfo(&meminfo); | |
510 HistogramBase::Sample usedkb = static_cast<HistogramBase::Sample>( | |
511 (meminfo.total - meminfo.free) >> 10); | |
512 used_histogram_->Add(usedkb); | |
513 } | |
514 } | |
515 | |
516 } // namespace base | |
OLD | NEW |