OLD | NEW |
---|---|
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/common/discardable_shared_memory_heap.h" | 5 #include "content/common/discardable_shared_memory_heap.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 | 8 |
9 #include "base/format_macros.h" | 9 #include "base/format_macros.h" |
10 #include "base/memory/discardable_shared_memory.h" | 10 #include "base/memory/discardable_shared_memory.h" |
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
271 } | 271 } |
272 | 272 |
273 scoped_ptr<DiscardableSharedMemoryHeap::Span> | 273 scoped_ptr<DiscardableSharedMemoryHeap::Span> |
274 DiscardableSharedMemoryHeap::Carve(Span* span, size_t blocks) { | 274 DiscardableSharedMemoryHeap::Carve(Span* span, size_t blocks) { |
275 scoped_ptr<Span> serving = RemoveFromFreeList(span); | 275 scoped_ptr<Span> serving = RemoveFromFreeList(span); |
276 | 276 |
277 const int extra = serving->length_ - blocks; | 277 const int extra = serving->length_ - blocks; |
278 if (extra) { | 278 if (extra) { |
279 scoped_ptr<Span> leftover( | 279 scoped_ptr<Span> leftover( |
280 new Span(serving->shared_memory_, serving->start_ + blocks, extra)); | 280 new Span(serving->shared_memory_, serving->start_ + blocks, extra)); |
281 leftover->set_is_locked(false); | |
281 DCHECK_IMPLIES(extra > 1, spans_.find(leftover->start_) == spans_.end()); | 282 DCHECK_IMPLIES(extra > 1, spans_.find(leftover->start_) == spans_.end()); |
282 RegisterSpan(leftover.get()); | 283 RegisterSpan(leftover.get()); |
283 | 284 |
284 // No need to coalesce as the previous span of |leftover| was just split | 285 // No need to coalesce as the previous span of |leftover| was just split |
285 // and the next span of |leftover| was not previously coalesced with | 286 // and the next span of |leftover| was not previously coalesced with |
286 // |span|. | 287 // |span|. |
287 InsertIntoFreeList(leftover.Pass()); | 288 InsertIntoFreeList(leftover.Pass()); |
288 | 289 |
289 serving->length_ = blocks; | 290 serving->length_ = blocks; |
290 spans_[serving->start_ + blocks - 1] = serving.get(); | 291 spans_[serving->start_ + blocks - 1] = serving.get(); |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
360 } | 361 } |
361 } | 362 } |
362 | 363 |
363 void DiscardableSharedMemoryHeap::OnMemoryDump( | 364 void DiscardableSharedMemoryHeap::OnMemoryDump( |
364 const base::DiscardableSharedMemory* shared_memory, | 365 const base::DiscardableSharedMemory* shared_memory, |
365 size_t size, | 366 size_t size, |
366 int32_t segment_id, | 367 int32_t segment_id, |
367 base::trace_event::ProcessMemoryDump* pmd) { | 368 base::trace_event::ProcessMemoryDump* pmd) { |
368 size_t allocated_objects_count = 0; | 369 size_t allocated_objects_count = 0; |
369 size_t allocated_objects_size_in_bytes = 0; | 370 size_t allocated_objects_size_in_bytes = 0; |
371 size_t locked_size_in_bytes = 0; | |
reveman
2015/10/01 10:34:39
nit: locked_objects_size_in_bytes to be consistent
ssid
2015/10/01 14:08:46
Done.
| |
370 size_t offset = | 372 size_t offset = |
371 reinterpret_cast<size_t>(shared_memory->memory()) / block_size_; | 373 reinterpret_cast<size_t>(shared_memory->memory()) / block_size_; |
372 size_t end = offset + size / block_size_; | 374 size_t end = offset + size / block_size_; |
373 while (offset < end) { | 375 while (offset < end) { |
374 Span* span = spans_[offset]; | 376 Span* span = spans_[offset]; |
375 if (!IsInFreeList(span)) { | 377 if (!IsInFreeList(span)) { |
378 const size_t span_size = span->length_ * block_size_; | |
reveman
2015/10/01 10:34:39
How about computing allocated_objects_size_in_bloc
ssid
2015/10/01 14:08:46
Done.
| |
379 allocated_objects_size_in_bytes += span_size; | |
380 locked_size_in_bytes += span->is_locked_ ? span_size : 0; | |
376 allocated_objects_count++; | 381 allocated_objects_count++; |
377 allocated_objects_size_in_bytes += span->length_ * block_size_; | |
378 } | 382 } |
379 offset += span->length_; | 383 offset += span->length_; |
380 } | 384 } |
381 | 385 |
382 std::string segment_dump_name = | 386 std::string segment_dump_name = |
383 base::StringPrintf("discardable/segment_%d", segment_id); | 387 base::StringPrintf("discardable/segment_%d", segment_id); |
384 base::trace_event::MemoryAllocatorDump* segment_dump = | 388 base::trace_event::MemoryAllocatorDump* segment_dump = |
385 pmd->CreateAllocatorDump(segment_dump_name); | 389 pmd->CreateAllocatorDump(segment_dump_name); |
386 segment_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, | 390 segment_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, |
387 base::trace_event::MemoryAllocatorDump::kUnitsBytes, | 391 base::trace_event::MemoryAllocatorDump::kUnitsBytes, |
388 static_cast<uint64_t>(size)); | 392 static_cast<uint64_t>(size)); |
389 | 393 |
390 base::trace_event::MemoryAllocatorDump* obj_dump = | 394 base::trace_event::MemoryAllocatorDump* obj_dump = |
391 pmd->CreateAllocatorDump(segment_dump_name + "/allocated_objects"); | 395 pmd->CreateAllocatorDump(segment_dump_name + "/allocated_objects"); |
392 obj_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameObjectCount, | 396 obj_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameObjectCount, |
393 base::trace_event::MemoryAllocatorDump::kUnitsObjects, | 397 base::trace_event::MemoryAllocatorDump::kUnitsObjects, |
394 static_cast<uint64_t>(allocated_objects_count)); | 398 static_cast<uint64_t>(allocated_objects_count)); |
395 obj_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, | 399 obj_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, |
396 base::trace_event::MemoryAllocatorDump::kUnitsBytes, | 400 base::trace_event::MemoryAllocatorDump::kUnitsBytes, |
397 static_cast<uint64_t>(allocated_objects_size_in_bytes)); | 401 static_cast<uint64_t>(allocated_objects_size_in_bytes)); |
402 obj_dump->AddScalar("locked_size", | |
403 base::trace_event::MemoryAllocatorDump::kUnitsBytes, | |
404 locked_size_in_bytes); | |
reveman
2015/10/01 10:34:39
why no static_cast<uint64_t> here but we have one
ssid
2015/10/01 14:08:46
I think initially it was added since the AddScalar
| |
398 | 405 |
399 // Emit an ownership edge towards a global allocator dump node. This allows | 406 // Emit an ownership edge towards a global allocator dump node. This allows |
400 // to avoid double-counting segments when both browser and child process emit | 407 // to avoid double-counting segments when both browser and child process emit |
401 // them. In the special case of single-process-mode, this will be the only | 408 // them. In the special case of single-process-mode, this will be the only |
402 // dumper active and the single ownership edge will become a no-op in the UI. | 409 // dumper active and the single ownership edge will become a no-op in the UI. |
403 const uint64 tracing_process_id = | 410 const uint64 tracing_process_id = |
404 base::trace_event::MemoryDumpManager::GetInstance() | 411 base::trace_event::MemoryDumpManager::GetInstance() |
405 ->GetTracingProcessId(); | 412 ->GetTracingProcessId(); |
406 base::trace_event::MemoryAllocatorDumpGuid shared_segment_guid = | 413 base::trace_event::MemoryAllocatorDumpGuid shared_segment_guid = |
407 GetSegmentGUIDForTracing(tracing_process_id, segment_id); | 414 GetSegmentGUIDForTracing(tracing_process_id, segment_id); |
(...skipping 29 matching lines...) Expand all Loading... | |
437 ScopedVector<ScopedMemorySegment>::const_iterator it = | 444 ScopedVector<ScopedMemorySegment>::const_iterator it = |
438 std::find_if(memory_segments_.begin(), memory_segments_.end(), | 445 std::find_if(memory_segments_.begin(), memory_segments_.end(), |
439 [span](const ScopedMemorySegment* segment) { | 446 [span](const ScopedMemorySegment* segment) { |
440 return segment->ContainsSpan(span); | 447 return segment->ContainsSpan(span); |
441 }); | 448 }); |
442 DCHECK(it != memory_segments_.end()); | 449 DCHECK(it != memory_segments_.end()); |
443 return (*it)->CreateMemoryAllocatorDump(span, block_size_, name, pmd); | 450 return (*it)->CreateMemoryAllocatorDump(span, block_size_, name, pmd); |
444 } | 451 } |
445 | 452 |
446 } // namespace content | 453 } // namespace content |
OLD | NEW |