| OLD | NEW |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "gpu/command_buffer/client/mapped_memory.h" | 5 #include "gpu/command_buffer/client/mapped_memory.h" |
| 6 | 6 |
| 7 #include <stddef.h> |
| 8 #include <stdint.h> |
| 9 |
| 7 #include <algorithm> | 10 #include <algorithm> |
| 8 #include <functional> | 11 #include <functional> |
| 9 | 12 |
| 10 #include "base/atomic_sequence_num.h" | 13 #include "base/atomic_sequence_num.h" |
| 11 #include "base/logging.h" | 14 #include "base/logging.h" |
| 12 #include "base/strings/stringprintf.h" | 15 #include "base/strings/stringprintf.h" |
| 13 #include "base/thread_task_runner_handle.h" | 16 #include "base/thread_task_runner_handle.h" |
| 14 #include "base/trace_event/memory_dump_manager.h" | 17 #include "base/trace_event/memory_dump_manager.h" |
| 15 #include "base/trace_event/trace_event.h" | 18 #include "base/trace_event/trace_event.h" |
| 16 #include "gpu/command_buffer/client/cmd_buffer_helper.h" | 19 #include "gpu/command_buffer/client/cmd_buffer_helper.h" |
| 17 #include "gpu/command_buffer/common/buffer.h" | 20 #include "gpu/command_buffer/common/buffer.h" |
| 18 | 21 |
| 19 namespace gpu { | 22 namespace gpu { |
| 20 namespace { | 23 namespace { |
| 21 | 24 |
| 22 // Generates process-unique IDs to use for tracing a MappedMemoryManager's | 25 // Generates process-unique IDs to use for tracing a MappedMemoryManager's |
| 23 // chunks. | 26 // chunks. |
| 24 base::StaticAtomicSequenceNumber g_next_mapped_memory_manager_tracing_id; | 27 base::StaticAtomicSequenceNumber g_next_mapped_memory_manager_tracing_id; |
| 25 | 28 |
| 26 } // namespace | 29 } // namespace |
| 27 | 30 |
| 28 MemoryChunk::MemoryChunk(int32 shm_id, | 31 MemoryChunk::MemoryChunk(int32_t shm_id, |
| 29 scoped_refptr<gpu::Buffer> shm, | 32 scoped_refptr<gpu::Buffer> shm, |
| 30 CommandBufferHelper* helper) | 33 CommandBufferHelper* helper) |
| 31 : shm_id_(shm_id), | 34 : shm_id_(shm_id), |
| 32 shm_(shm), | 35 shm_(shm), |
| 33 allocator_(shm->size(), helper, shm->memory()) {} | 36 allocator_(shm->size(), helper, shm->memory()) {} |
| 34 | 37 |
| 35 MemoryChunk::~MemoryChunk() {} | 38 MemoryChunk::~MemoryChunk() {} |
| 36 | 39 |
| 37 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper, | 40 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper, |
| 38 size_t unused_memory_reclaim_limit) | 41 size_t unused_memory_reclaim_limit) |
| (...skipping 15 matching lines...) Expand all Loading... |
| 54 MappedMemoryManager::~MappedMemoryManager() { | 57 MappedMemoryManager::~MappedMemoryManager() { |
| 55 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( | 58 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( |
| 56 this); | 59 this); |
| 57 | 60 |
| 58 CommandBuffer* cmd_buf = helper_->command_buffer(); | 61 CommandBuffer* cmd_buf = helper_->command_buffer(); |
| 59 for (auto& chunk : chunks_) { | 62 for (auto& chunk : chunks_) { |
| 60 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); | 63 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); |
| 61 } | 64 } |
| 62 } | 65 } |
| 63 | 66 |
| 64 void* MappedMemoryManager::Alloc( | 67 void* MappedMemoryManager::Alloc(unsigned int size, |
| 65 unsigned int size, int32* shm_id, unsigned int* shm_offset) { | 68 int32_t* shm_id, |
| 69 unsigned int* shm_offset) { |
| 66 DCHECK(shm_id); | 70 DCHECK(shm_id); |
| 67 DCHECK(shm_offset); | 71 DCHECK(shm_offset); |
| 68 if (size <= allocated_memory_) { | 72 if (size <= allocated_memory_) { |
| 69 size_t total_bytes_in_use = 0; | 73 size_t total_bytes_in_use = 0; |
| 70 // See if any of the chunks can satisfy this request. | 74 // See if any of the chunks can satisfy this request. |
| 71 for (auto& chunk : chunks_) { | 75 for (auto& chunk : chunks_) { |
| 72 chunk->FreeUnused(); | 76 chunk->FreeUnused(); |
| 73 total_bytes_in_use += chunk->bytes_in_use(); | 77 total_bytes_in_use += chunk->bytes_in_use(); |
| 74 if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) { | 78 if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) { |
| 75 void* mem = chunk->Alloc(size); | 79 void* mem = chunk->Alloc(size); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 101 if (max_allocated_bytes_ != kNoLimit && | 105 if (max_allocated_bytes_ != kNoLimit && |
| 102 (allocated_memory_ + size) > max_allocated_bytes_) { | 106 (allocated_memory_ + size) > max_allocated_bytes_) { |
| 103 return nullptr; | 107 return nullptr; |
| 104 } | 108 } |
| 105 | 109 |
| 106 // Make a new chunk to satisfy the request. | 110 // Make a new chunk to satisfy the request. |
| 107 CommandBuffer* cmd_buf = helper_->command_buffer(); | 111 CommandBuffer* cmd_buf = helper_->command_buffer(); |
| 108 unsigned int chunk_size = | 112 unsigned int chunk_size = |
| 109 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) * | 113 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) * |
| 110 chunk_size_multiple_; | 114 chunk_size_multiple_; |
| 111 int32 id = -1; | 115 int32_t id = -1; |
| 112 scoped_refptr<gpu::Buffer> shm = | 116 scoped_refptr<gpu::Buffer> shm = |
| 113 cmd_buf->CreateTransferBuffer(chunk_size, &id); | 117 cmd_buf->CreateTransferBuffer(chunk_size, &id); |
| 114 if (id < 0) | 118 if (id < 0) |
| 115 return NULL; | 119 return NULL; |
| 116 DCHECK(shm.get()); | 120 DCHECK(shm.get()); |
| 117 MemoryChunk* mc = new MemoryChunk(id, shm, helper_); | 121 MemoryChunk* mc = new MemoryChunk(id, shm, helper_); |
| 118 allocated_memory_ += mc->GetSize(); | 122 allocated_memory_ += mc->GetSize(); |
| 119 chunks_.push_back(make_scoped_ptr(mc)); | 123 chunks_.push_back(make_scoped_ptr(mc)); |
| 120 void* mem = mc->Alloc(size); | 124 void* mem = mc->Alloc(size); |
| 121 DCHECK(mem); | 125 DCHECK(mem); |
| 122 *shm_id = mc->shm_id(); | 126 *shm_id = mc->shm_id(); |
| 123 *shm_offset = mc->GetOffset(mem); | 127 *shm_offset = mc->GetOffset(mem); |
| 124 return mem; | 128 return mem; |
| 125 } | 129 } |
| 126 | 130 |
| 127 void MappedMemoryManager::Free(void* pointer) { | 131 void MappedMemoryManager::Free(void* pointer) { |
| 128 for (auto& chunk : chunks_) { | 132 for (auto& chunk : chunks_) { |
| 129 if (chunk->IsInChunk(pointer)) { | 133 if (chunk->IsInChunk(pointer)) { |
| 130 chunk->Free(pointer); | 134 chunk->Free(pointer); |
| 131 return; | 135 return; |
| 132 } | 136 } |
| 133 } | 137 } |
| 134 NOTREACHED(); | 138 NOTREACHED(); |
| 135 } | 139 } |
| 136 | 140 |
| 137 void MappedMemoryManager::FreePendingToken(void* pointer, int32 token) { | 141 void MappedMemoryManager::FreePendingToken(void* pointer, int32_t token) { |
| 138 for (auto& chunk : chunks_) { | 142 for (auto& chunk : chunks_) { |
| 139 if (chunk->IsInChunk(pointer)) { | 143 if (chunk->IsInChunk(pointer)) { |
| 140 chunk->FreePendingToken(pointer, token); | 144 chunk->FreePendingToken(pointer, token); |
| 141 return; | 145 return; |
| 142 } | 146 } |
| 143 } | 147 } |
| 144 NOTREACHED(); | 148 NOTREACHED(); |
| 145 } | 149 } |
| 146 | 150 |
| 147 void MappedMemoryManager::FreeUnused() { | 151 void MappedMemoryManager::FreeUnused() { |
| 148 CommandBuffer* cmd_buf = helper_->command_buffer(); | 152 CommandBuffer* cmd_buf = helper_->command_buffer(); |
| 149 MemoryChunkVector::iterator iter = chunks_.begin(); | 153 MemoryChunkVector::iterator iter = chunks_.begin(); |
| 150 while (iter != chunks_.end()) { | 154 while (iter != chunks_.end()) { |
| 151 MemoryChunk* chunk = (*iter).get(); | 155 MemoryChunk* chunk = (*iter).get(); |
| 152 chunk->FreeUnused(); | 156 chunk->FreeUnused(); |
| 153 if (!chunk->InUse()) { | 157 if (!chunk->InUse()) { |
| 154 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); | 158 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); |
| 155 allocated_memory_ -= chunk->GetSize(); | 159 allocated_memory_ -= chunk->GetSize(); |
| 156 iter = chunks_.erase(iter); | 160 iter = chunks_.erase(iter); |
| 157 } else { | 161 } else { |
| 158 ++iter; | 162 ++iter; |
| 159 } | 163 } |
| 160 } | 164 } |
| 161 } | 165 } |
| 162 | 166 |
| 163 bool MappedMemoryManager::OnMemoryDump( | 167 bool MappedMemoryManager::OnMemoryDump( |
| 164 const base::trace_event::MemoryDumpArgs& args, | 168 const base::trace_event::MemoryDumpArgs& args, |
| 165 base::trace_event::ProcessMemoryDump* pmd) { | 169 base::trace_event::ProcessMemoryDump* pmd) { |
| 166 const uint64 tracing_process_id = | 170 const uint64_t tracing_process_id = |
| 167 base::trace_event::MemoryDumpManager::GetInstance() | 171 base::trace_event::MemoryDumpManager::GetInstance() |
| 168 ->GetTracingProcessId(); | 172 ->GetTracingProcessId(); |
| 169 | 173 |
| 170 for (const auto& chunk : chunks_) { | 174 for (const auto& chunk : chunks_) { |
| 171 std::string dump_name = base::StringPrintf( | 175 std::string dump_name = base::StringPrintf( |
| 172 "gpu/mapped_memory/manager_%d/chunk_%d", tracing_id_, chunk->shm_id()); | 176 "gpu/mapped_memory/manager_%d/chunk_%d", tracing_id_, chunk->shm_id()); |
| 173 base::trace_event::MemoryAllocatorDump* dump = | 177 base::trace_event::MemoryAllocatorDump* dump = |
| 174 pmd->CreateAllocatorDump(dump_name); | 178 pmd->CreateAllocatorDump(dump_name); |
| 175 | 179 |
| 176 dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, | 180 dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, |
| (...skipping 29 matching lines...) Expand all Loading... |
| 206 void ScopedMappedMemoryPtr::Reset(uint32_t new_size) { | 210 void ScopedMappedMemoryPtr::Reset(uint32_t new_size) { |
| 207 Release(); | 211 Release(); |
| 208 | 212 |
| 209 if (new_size) { | 213 if (new_size) { |
| 210 buffer_ = mapped_memory_manager_->Alloc(new_size, &shm_id_, &shm_offset_); | 214 buffer_ = mapped_memory_manager_->Alloc(new_size, &shm_id_, &shm_offset_); |
| 211 size_ = buffer_ ? new_size : 0; | 215 size_ = buffer_ ? new_size : 0; |
| 212 } | 216 } |
| 213 } | 217 } |
| 214 | 218 |
| 215 } // namespace gpu | 219 } // namespace gpu |
| OLD | NEW |