Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(802)

Side by Side Diff: content/common/discardable_shared_memory_heap.cc

Issue 1407483003: Reland of [tracing] Display the locked size of discardable memory segment. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Fix. Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/discardable_shared_memory_heap.h" 5 #include "content/common/discardable_shared_memory_heap.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/format_macros.h" 9 #include "base/format_macros.h"
10 #include "base/memory/discardable_shared_memory.h" 10 #include "base/memory/discardable_shared_memory.h"
(...skipping 10 matching lines...) Expand all
21 bool IsInFreeList(DiscardableSharedMemoryHeap::Span* span) { 21 bool IsInFreeList(DiscardableSharedMemoryHeap::Span* span) {
22 return span->previous() || span->next(); 22 return span->previous() || span->next();
23 } 23 }
24 24
25 } // namespace 25 } // namespace
26 26
27 DiscardableSharedMemoryHeap::Span::Span( 27 DiscardableSharedMemoryHeap::Span::Span(
28 base::DiscardableSharedMemory* shared_memory, 28 base::DiscardableSharedMemory* shared_memory,
29 size_t start, 29 size_t start,
30 size_t length) 30 size_t length)
31 : shared_memory_(shared_memory), start_(start), length_(length) { 31 : shared_memory_(shared_memory),
32 } 32 start_(start),
33 length_(length),
34 is_locked_(false) {}
33 35
34 DiscardableSharedMemoryHeap::Span::~Span() { 36 DiscardableSharedMemoryHeap::Span::~Span() {
35 } 37 }
36 38
37 DiscardableSharedMemoryHeap::ScopedMemorySegment::ScopedMemorySegment( 39 DiscardableSharedMemoryHeap::ScopedMemorySegment::ScopedMemorySegment(
38 DiscardableSharedMemoryHeap* heap, 40 DiscardableSharedMemoryHeap* heap,
39 scoped_ptr<base::DiscardableSharedMemory> shared_memory, 41 scoped_ptr<base::DiscardableSharedMemory> shared_memory,
40 size_t size, 42 size_t size,
41 int32_t id, 43 int32_t id,
42 const base::Closure& deleted_callback) 44 const base::Closure& deleted_callback)
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after
271 } 273 }
272 274
273 scoped_ptr<DiscardableSharedMemoryHeap::Span> 275 scoped_ptr<DiscardableSharedMemoryHeap::Span>
274 DiscardableSharedMemoryHeap::Carve(Span* span, size_t blocks) { 276 DiscardableSharedMemoryHeap::Carve(Span* span, size_t blocks) {
275 scoped_ptr<Span> serving = RemoveFromFreeList(span); 277 scoped_ptr<Span> serving = RemoveFromFreeList(span);
276 278
277 const int extra = serving->length_ - blocks; 279 const int extra = serving->length_ - blocks;
278 if (extra) { 280 if (extra) {
279 scoped_ptr<Span> leftover( 281 scoped_ptr<Span> leftover(
280 new Span(serving->shared_memory_, serving->start_ + blocks, extra)); 282 new Span(serving->shared_memory_, serving->start_ + blocks, extra));
283 leftover->set_is_locked(false);
281 DCHECK_IMPLIES(extra > 1, spans_.find(leftover->start_) == spans_.end()); 284 DCHECK_IMPLIES(extra > 1, spans_.find(leftover->start_) == spans_.end());
282 RegisterSpan(leftover.get()); 285 RegisterSpan(leftover.get());
283 286
284 // No need to coalesce as the previous span of |leftover| was just split 287 // No need to coalesce as the previous span of |leftover| was just split
285 // and the next span of |leftover| was not previously coalesced with 288 // and the next span of |leftover| was not previously coalesced with
286 // |span|. 289 // |span|.
287 InsertIntoFreeList(leftover.Pass()); 290 InsertIntoFreeList(leftover.Pass());
288 291
289 serving->length_ = blocks; 292 serving->length_ = blocks;
290 spans_[serving->start_ + blocks - 1] = serving.get(); 293 spans_[serving->start_ + blocks - 1] = serving.get();
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
359 } 362 }
360 } 363 }
361 } 364 }
362 365
363 void DiscardableSharedMemoryHeap::OnMemoryDump( 366 void DiscardableSharedMemoryHeap::OnMemoryDump(
364 const base::DiscardableSharedMemory* shared_memory, 367 const base::DiscardableSharedMemory* shared_memory,
365 size_t size, 368 size_t size,
366 int32_t segment_id, 369 int32_t segment_id,
367 base::trace_event::ProcessMemoryDump* pmd) { 370 base::trace_event::ProcessMemoryDump* pmd) {
368 size_t allocated_objects_count = 0; 371 size_t allocated_objects_count = 0;
369 size_t allocated_objects_size_in_bytes = 0; 372 size_t allocated_objects_blocks = 0;
reveman 2015/10/13 18:24:33 nit: /_objects_blocks/objects_size_in_blocks/
373 size_t locked_objects_blocks = 0;
reveman 2015/10/13 18:24:33 nit: ditto
370 size_t offset = 374 size_t offset =
371 reinterpret_cast<size_t>(shared_memory->memory()) / block_size_; 375 reinterpret_cast<size_t>(shared_memory->memory()) / block_size_;
372 size_t end = offset + size / block_size_; 376 size_t end = offset + size / block_size_;
373 while (offset < end) { 377 while (offset < end) {
374 Span* span = spans_[offset]; 378 Span* span = spans_[offset];
375 if (!IsInFreeList(span)) { 379 if (!IsInFreeList(span)) {
380 allocated_objects_blocks += span->length_;
381 locked_objects_blocks += span->is_locked_ ? span->length_ : 0;
376 allocated_objects_count++; 382 allocated_objects_count++;
377 allocated_objects_size_in_bytes += span->length_ * block_size_;
378 } 383 }
379 offset += span->length_; 384 offset += span->length_;
380 } 385 }
386 size_t allocated_objects_size_in_bytes =
387 allocated_objects_blocks * block_size_;
388 size_t locked_objects_size_in_bytes = locked_objects_blocks * block_size_;
381 389
382 std::string segment_dump_name = 390 std::string segment_dump_name =
383 base::StringPrintf("discardable/segment_%d", segment_id); 391 base::StringPrintf("discardable/segment_%d", segment_id);
384 base::trace_event::MemoryAllocatorDump* segment_dump = 392 base::trace_event::MemoryAllocatorDump* segment_dump =
385 pmd->CreateAllocatorDump(segment_dump_name); 393 pmd->CreateAllocatorDump(segment_dump_name);
386 segment_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, 394 segment_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
387 base::trace_event::MemoryAllocatorDump::kUnitsBytes, 395 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
388 static_cast<uint64_t>(size)); 396 size);
389 397
390 base::trace_event::MemoryAllocatorDump* obj_dump = 398 base::trace_event::MemoryAllocatorDump* obj_dump =
391 pmd->CreateAllocatorDump(segment_dump_name + "/allocated_objects"); 399 pmd->CreateAllocatorDump(segment_dump_name + "/allocated_objects");
392 obj_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameObjectCount, 400 obj_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameObjectCount,
393 base::trace_event::MemoryAllocatorDump::kUnitsObjects, 401 base::trace_event::MemoryAllocatorDump::kUnitsObjects,
394 static_cast<uint64_t>(allocated_objects_count)); 402 allocated_objects_count);
395 obj_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, 403 obj_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
396 base::trace_event::MemoryAllocatorDump::kUnitsBytes, 404 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
397 static_cast<uint64_t>(allocated_objects_size_in_bytes)); 405 allocated_objects_size_in_bytes);
406 obj_dump->AddScalar("locked_size",
407 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
408 locked_objects_size_in_bytes);
398 409
399 // Emit an ownership edge towards a global allocator dump node. This allows 410 // Emit an ownership edge towards a global allocator dump node. This allows
400 // to avoid double-counting segments when both browser and child process emit 411 // to avoid double-counting segments when both browser and child process emit
401 // them. In the special case of single-process-mode, this will be the only 412 // them. In the special case of single-process-mode, this will be the only
402 // dumper active and the single ownership edge will become a no-op in the UI. 413 // dumper active and the single ownership edge will become a no-op in the UI.
403 const uint64 tracing_process_id = 414 const uint64 tracing_process_id =
404 base::trace_event::MemoryDumpManager::GetInstance() 415 base::trace_event::MemoryDumpManager::GetInstance()
405 ->GetTracingProcessId(); 416 ->GetTracingProcessId();
406 base::trace_event::MemoryAllocatorDumpGuid shared_segment_guid = 417 base::trace_event::MemoryAllocatorDumpGuid shared_segment_guid =
407 GetSegmentGUIDForTracing(tracing_process_id, segment_id); 418 GetSegmentGUIDForTracing(tracing_process_id, segment_id);
(...skipping 29 matching lines...) Expand all
437 ScopedVector<ScopedMemorySegment>::const_iterator it = 448 ScopedVector<ScopedMemorySegment>::const_iterator it =
438 std::find_if(memory_segments_.begin(), memory_segments_.end(), 449 std::find_if(memory_segments_.begin(), memory_segments_.end(),
439 [span](const ScopedMemorySegment* segment) { 450 [span](const ScopedMemorySegment* segment) {
440 return segment->ContainsSpan(span); 451 return segment->ContainsSpan(span);
441 }); 452 });
442 DCHECK(it != memory_segments_.end()); 453 DCHECK(it != memory_segments_.end());
443 return (*it)->CreateMemoryAllocatorDump(span, block_size_, name, pmd); 454 return (*it)->CreateMemoryAllocatorDump(span, block_size_, name, pmd);
444 } 455 }
445 456
446 } // namespace content 457 } // namespace content
OLDNEW
« no previous file with comments | « content/common/discardable_shared_memory_heap.h ('k') | content/common/host_discardable_shared_memory_manager.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698