Index: content/common/discardable_shared_memory_heap.cc |
diff --git a/content/common/discardable_shared_memory_heap.cc b/content/common/discardable_shared_memory_heap.cc |
index ccf2729c557f660e3b675def1393449be60564dd..45af67c9bcfbe0bb4edf63b0cc59ae6271083649 100644 |
--- a/content/common/discardable_shared_memory_heap.cc |
+++ b/content/common/discardable_shared_memory_heap.cc |
@@ -27,13 +27,39 @@ bool IsInFreeList(DiscardableSharedMemoryHeap::Span* span) { |
DiscardableSharedMemoryHeap::Span::Span( |
base::DiscardableSharedMemory* shared_memory, |
size_t start, |
- size_t length) |
- : shared_memory_(shared_memory), start_(start), length_(length) { |
-} |
+ size_t length, |
+ bool is_locked) |
+ : shared_memory_(shared_memory), |
+ start_(start), |
+ length_(length), |
+ is_locked_(is_locked) {} |
DiscardableSharedMemoryHeap::Span::~Span() { |
} |
+base::DiscardableSharedMemory::LockResult |
+DiscardableSharedMemoryHeap::Span::Lock(size_t page_size) { |
+ const size_t offset = |
+ start_ * page_size - reinterpret_cast<size_t>(shared_memory_->memory()); |
+ const size_t length = length_ * page_size; |
+ base::DiscardableSharedMemory::LockResult result = |
+ shared_memory_->Lock(offset, length); |
+ is_locked_ = result == base::DiscardableSharedMemory::SUCCESS; |
+ return result; |
+} |
+ |
+void DiscardableSharedMemoryHeap::Span::Unlock(size_t page_size) { |
+ const size_t offset = |
+ start_ * page_size - reinterpret_cast<size_t>(shared_memory_->memory()); |
+ const size_t length = length_ * page_size; |
+ shared_memory_->Unlock(offset, length); |
+ is_locked_ = false; |
+} |
+ |
+bool DiscardableSharedMemoryHeap::Span::IsMemoryResident() const { |
+ return shared_memory_->IsMemoryResident(); |
+} |
+ |
DiscardableSharedMemoryHeap::ScopedMemorySegment::ScopedMemorySegment( |
DiscardableSharedMemoryHeap* heap, |
scoped_ptr<base::DiscardableSharedMemory> shared_memory, |
@@ -62,7 +88,7 @@ bool DiscardableSharedMemoryHeap::ScopedMemorySegment::IsResident() const { |
bool DiscardableSharedMemoryHeap::ScopedMemorySegment::ContainsSpan( |
Span* span) const { |
- return shared_memory_ == span->shared_memory(); |
+ return shared_memory_ == span->shared_memory_; |
} |
base::trace_event::MemoryAllocatorDump* |
@@ -71,7 +97,7 @@ DiscardableSharedMemoryHeap::ScopedMemorySegment::CreateMemoryAllocatorDump( |
size_t block_size, |
const char* name, |
base::trace_event::ProcessMemoryDump* pmd) const { |
- DCHECK_EQ(shared_memory_, span->shared_memory()); |
+ DCHECK_EQ(shared_memory_, span->shared_memory_); |
base::trace_event::MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(name); |
dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, |
base::trace_event::MemoryAllocatorDump::kUnitsBytes, |
@@ -119,7 +145,7 @@ scoped_ptr<DiscardableSharedMemoryHeap::Span> DiscardableSharedMemoryHeap::Grow( |
scoped_ptr<Span> span( |
new Span(shared_memory.get(), |
reinterpret_cast<size_t>(shared_memory->memory()) / block_size_, |
- size / block_size_)); |
+ size / block_size_, true /* is_locked */)); |
DCHECK(spans_.find(span->start_) == spans_.end()); |
DCHECK(spans_.find(span->start_ + span->length_ - 1) == spans_.end()); |
RegisterSpan(span.get()); |
@@ -172,8 +198,9 @@ DiscardableSharedMemoryHeap::Split(Span* span, size_t blocks) { |
DCHECK(blocks); |
DCHECK_LT(blocks, span->length_); |
- scoped_ptr<Span> leftover(new Span( |
- span->shared_memory_, span->start_ + blocks, span->length_ - blocks)); |
+ scoped_ptr<Span> leftover( |
+ new Span(span->shared_memory_, span->start_ + blocks, |
+ span->length_ - blocks, true /* is_locked */)); |
DCHECK_IMPLIES(leftover->length_ > 1, |
spans_.find(leftover->start_) == spans_.end()); |
RegisterSpan(leftover.get()); |
@@ -276,8 +303,9 @@ DiscardableSharedMemoryHeap::Carve(Span* span, size_t blocks) { |
const int extra = serving->length_ - blocks; |
if (extra) { |
- scoped_ptr<Span> leftover( |
- new Span(serving->shared_memory_, serving->start_ + blocks, extra)); |
+ scoped_ptr<Span> leftover(new Span(serving->shared_memory_, |
+ serving->start_ + blocks, extra, |
+ false /* is_locked */)); |
DCHECK_IMPLIES(extra > 1, spans_.find(leftover->start_) == spans_.end()); |
RegisterSpan(leftover.get()); |
@@ -367,6 +395,7 @@ void DiscardableSharedMemoryHeap::OnMemoryDump( |
base::trace_event::ProcessMemoryDump* pmd) { |
size_t allocated_objects_count = 0; |
size_t allocated_objects_size_in_bytes = 0; |
+ size_t locked_size_in_bytes = 0; |
size_t offset = |
reinterpret_cast<size_t>(shared_memory->memory()) / block_size_; |
size_t end = offset + size / block_size_; |
@@ -374,7 +403,9 @@ void DiscardableSharedMemoryHeap::OnMemoryDump( |
Span* span = spans_[offset]; |
if (!IsInFreeList(span)) { |
allocated_objects_count++; |
- allocated_objects_size_in_bytes += span->length_ * block_size_; |
+ const size_t span_size = span->length_ * block_size_; |
+ allocated_objects_size_in_bytes += span_size; |
+ locked_size_in_bytes = span->is_locked_ ? span_size : 0; |
} |
offset += span->length_; |
} |
@@ -395,6 +426,9 @@ void DiscardableSharedMemoryHeap::OnMemoryDump( |
obj_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, |
base::trace_event::MemoryAllocatorDump::kUnitsBytes, |
static_cast<uint64_t>(allocated_objects_size_in_bytes)); |
+ obj_dump->AddScalar("locked_size", |
+ base::trace_event::MemoryAllocatorDump::kUnitsBytes, |
+ locked_size_in_bytes); |
// Emit an ownership edge towards a global allocator dump node. This allows |
// to avoid double-counting segments when both browser and child process emit |