Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(158)

Side by Side Diff: content/common/discardable_shared_memory_heap.cc

Issue 1386333003: Revert of [tracing] Display the locked size of discardable memory segment. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/discardable_shared_memory_heap.h" 5 #include "content/common/discardable_shared_memory_heap.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/format_macros.h" 9 #include "base/format_macros.h"
10 #include "base/memory/discardable_shared_memory.h" 10 #include "base/memory/discardable_shared_memory.h"
(...skipping 10 matching lines...) Expand all
21 bool IsInFreeList(DiscardableSharedMemoryHeap::Span* span) { 21 bool IsInFreeList(DiscardableSharedMemoryHeap::Span* span) {
22 return span->previous() || span->next(); 22 return span->previous() || span->next();
23 } 23 }
24 24
25 } // namespace 25 } // namespace
26 26
27 DiscardableSharedMemoryHeap::Span::Span( 27 DiscardableSharedMemoryHeap::Span::Span(
28 base::DiscardableSharedMemory* shared_memory, 28 base::DiscardableSharedMemory* shared_memory,
29 size_t start, 29 size_t start,
30 size_t length) 30 size_t length)
31 : shared_memory_(shared_memory), 31 : shared_memory_(shared_memory), start_(start), length_(length) {
32 start_(start), 32 }
33 length_(length),
34 is_locked_(false) {}
35 33
36 DiscardableSharedMemoryHeap::Span::~Span() { 34 DiscardableSharedMemoryHeap::Span::~Span() {
37 } 35 }
38 36
39 DiscardableSharedMemoryHeap::ScopedMemorySegment::ScopedMemorySegment( 37 DiscardableSharedMemoryHeap::ScopedMemorySegment::ScopedMemorySegment(
40 DiscardableSharedMemoryHeap* heap, 38 DiscardableSharedMemoryHeap* heap,
41 scoped_ptr<base::DiscardableSharedMemory> shared_memory, 39 scoped_ptr<base::DiscardableSharedMemory> shared_memory,
42 size_t size, 40 size_t size,
43 int32_t id, 41 int32_t id,
44 const base::Closure& deleted_callback) 42 const base::Closure& deleted_callback)
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after
273 } 271 }
274 272
275 scoped_ptr<DiscardableSharedMemoryHeap::Span> 273 scoped_ptr<DiscardableSharedMemoryHeap::Span>
276 DiscardableSharedMemoryHeap::Carve(Span* span, size_t blocks) { 274 DiscardableSharedMemoryHeap::Carve(Span* span, size_t blocks) {
277 scoped_ptr<Span> serving = RemoveFromFreeList(span); 275 scoped_ptr<Span> serving = RemoveFromFreeList(span);
278 276
279 const int extra = serving->length_ - blocks; 277 const int extra = serving->length_ - blocks;
280 if (extra) { 278 if (extra) {
281 scoped_ptr<Span> leftover( 279 scoped_ptr<Span> leftover(
282 new Span(serving->shared_memory_, serving->start_ + blocks, extra)); 280 new Span(serving->shared_memory_, serving->start_ + blocks, extra));
283 leftover->set_is_locked(false);
284 DCHECK_IMPLIES(extra > 1, spans_.find(leftover->start_) == spans_.end()); 281 DCHECK_IMPLIES(extra > 1, spans_.find(leftover->start_) == spans_.end());
285 RegisterSpan(leftover.get()); 282 RegisterSpan(leftover.get());
286 283
287 // No need to coalesce as the previous span of |leftover| was just split 284 // No need to coalesce as the previous span of |leftover| was just split
288 // and the next span of |leftover| was not previously coalesced with 285 // and the next span of |leftover| was not previously coalesced with
289 // |span|. 286 // |span|.
290 InsertIntoFreeList(leftover.Pass()); 287 InsertIntoFreeList(leftover.Pass());
291 288
292 serving->length_ = blocks; 289 serving->length_ = blocks;
293 spans_[serving->start_ + blocks - 1] = serving.get(); 290 spans_[serving->start_ + blocks - 1] = serving.get();
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
362 } 359 }
363 } 360 }
364 } 361 }
365 362
366 void DiscardableSharedMemoryHeap::OnMemoryDump( 363 void DiscardableSharedMemoryHeap::OnMemoryDump(
367 const base::DiscardableSharedMemory* shared_memory, 364 const base::DiscardableSharedMemory* shared_memory,
368 size_t size, 365 size_t size,
369 int32_t segment_id, 366 int32_t segment_id,
370 base::trace_event::ProcessMemoryDump* pmd) { 367 base::trace_event::ProcessMemoryDump* pmd) {
371 size_t allocated_objects_count = 0; 368 size_t allocated_objects_count = 0;
372 size_t allocated_objects_blocks = 0; 369 size_t allocated_objects_size_in_bytes = 0;
373 size_t locked_objects_blocks = 0;
374 size_t offset = 370 size_t offset =
375 reinterpret_cast<size_t>(shared_memory->memory()) / block_size_; 371 reinterpret_cast<size_t>(shared_memory->memory()) / block_size_;
376 size_t end = offset + size / block_size_; 372 size_t end = offset + size / block_size_;
377 while (offset < end) { 373 while (offset < end) {
378 Span* span = spans_[offset]; 374 Span* span = spans_[offset];
379 if (!IsInFreeList(span)) { 375 if (!IsInFreeList(span)) {
380 allocated_objects_blocks += span->length_;
381 locked_objects_blocks += span->is_locked_ ? span->length_ : 0;
382 allocated_objects_count++; 376 allocated_objects_count++;
377 allocated_objects_size_in_bytes += span->length_ * block_size_;
383 } 378 }
384 offset += span->length_; 379 offset += span->length_;
385 } 380 }
386 size_t allocated_objects_size_in_bytes =
387 allocated_objects_blocks * block_size_;
388 size_t locked_objects_size_in_bytes = locked_objects_blocks * block_size_;
389 381
390 std::string segment_dump_name = 382 std::string segment_dump_name =
391 base::StringPrintf("discardable/segment_%d", segment_id); 383 base::StringPrintf("discardable/segment_%d", segment_id);
392 base::trace_event::MemoryAllocatorDump* segment_dump = 384 base::trace_event::MemoryAllocatorDump* segment_dump =
393 pmd->CreateAllocatorDump(segment_dump_name); 385 pmd->CreateAllocatorDump(segment_dump_name);
394 segment_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, 386 segment_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
395 base::trace_event::MemoryAllocatorDump::kUnitsBytes, 387 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
396 size); 388 static_cast<uint64_t>(size));
397 389
398 base::trace_event::MemoryAllocatorDump* obj_dump = 390 base::trace_event::MemoryAllocatorDump* obj_dump =
399 pmd->CreateAllocatorDump(segment_dump_name + "/allocated_objects"); 391 pmd->CreateAllocatorDump(segment_dump_name + "/allocated_objects");
400 obj_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameObjectCount, 392 obj_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameObjectCount,
401 base::trace_event::MemoryAllocatorDump::kUnitsObjects, 393 base::trace_event::MemoryAllocatorDump::kUnitsObjects,
402 allocated_objects_count); 394 static_cast<uint64_t>(allocated_objects_count));
403 obj_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, 395 obj_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
404 base::trace_event::MemoryAllocatorDump::kUnitsBytes, 396 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
405 allocated_objects_size_in_bytes); 397 static_cast<uint64_t>(allocated_objects_size_in_bytes));
406 obj_dump->AddScalar("locked_size",
407 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
408 locked_objects_size_in_bytes);
409 398
410 // Emit an ownership edge towards a global allocator dump node. This allows 399 // Emit an ownership edge towards a global allocator dump node. This allows
411 // to avoid double-counting segments when both browser and child process emit 400 // to avoid double-counting segments when both browser and child process emit
412 // them. In the special case of single-process-mode, this will be the only 401 // them. In the special case of single-process-mode, this will be the only
413 // dumper active and the single ownership edge will become a no-op in the UI. 402 // dumper active and the single ownership edge will become a no-op in the UI.
414 const uint64 tracing_process_id = 403 const uint64 tracing_process_id =
415 base::trace_event::MemoryDumpManager::GetInstance() 404 base::trace_event::MemoryDumpManager::GetInstance()
416 ->GetTracingProcessId(); 405 ->GetTracingProcessId();
417 base::trace_event::MemoryAllocatorDumpGuid shared_segment_guid = 406 base::trace_event::MemoryAllocatorDumpGuid shared_segment_guid =
418 GetSegmentGUIDForTracing(tracing_process_id, segment_id); 407 GetSegmentGUIDForTracing(tracing_process_id, segment_id);
(...skipping 29 matching lines...) Expand all
448 ScopedVector<ScopedMemorySegment>::const_iterator it = 437 ScopedVector<ScopedMemorySegment>::const_iterator it =
449 std::find_if(memory_segments_.begin(), memory_segments_.end(), 438 std::find_if(memory_segments_.begin(), memory_segments_.end(),
450 [span](const ScopedMemorySegment* segment) { 439 [span](const ScopedMemorySegment* segment) {
451 return segment->ContainsSpan(span); 440 return segment->ContainsSpan(span);
452 }); 441 });
453 DCHECK(it != memory_segments_.end()); 442 DCHECK(it != memory_segments_.end());
454 return (*it)->CreateMemoryAllocatorDump(span, block_size_, name, pmd); 443 return (*it)->CreateMemoryAllocatorDump(span, block_size_, name, pmd);
455 } 444 }
456 445
457 } // namespace content 446 } // namespace content
OLDNEW
« no previous file with comments | « content/common/discardable_shared_memory_heap.h ('k') | content/common/host_discardable_shared_memory_manager.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698