Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "gpu/command_buffer/client/mapped_memory.h" | 5 #include "gpu/command_buffer/client/mapped_memory.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <functional> | 8 #include <functional> |
| 9 | 9 |
| 10 #if defined(OS_ANDROID) | |
| 11 #include "base/android/sys_utils.h" | |
| 12 #endif | |
| 13 | |
| 10 #include "gpu/command_buffer/client/cmd_buffer_helper.h" | 14 #include "gpu/command_buffer/client/cmd_buffer_helper.h" |
| 11 | 15 |
| 12 namespace gpu { | 16 namespace gpu { |
| 13 | 17 |
| 14 MemoryChunk::MemoryChunk( | 18 MemoryChunk::MemoryChunk( |
| 15 int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper) | 19 int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper) |
| 16 : shm_id_(shm_id), | 20 : shm_id_(shm_id), |
| 17 shm_(shm), | 21 shm_(shm), |
| 18 allocator_(shm.size, helper, shm.ptr) { | 22 allocator_(shm.size, helper, shm.ptr) { |
| 19 } | 23 } |
| 20 | 24 |
| 21 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper) | 25 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper) |
| 22 : chunk_size_multiple_(1), | 26 : chunk_size_multiple_(1), |
| 23 helper_(helper) { | 27 helper_(helper), |
| 28 allocated_memory_(0), | |
| 29 memory_limit_(kNoLimit) { | |
| 30 #if defined(OS_ANDROID) | |
|
no sievers
2013/08/14 02:05:52
can we avoid ifdefs? Can't we just pass in the lim
| |
| 31 if (base::android::SysUtils::IsLowEndDevice()) { | |
| 32 memory_limit_ = kAndroidLowEndLimit; | |
| 33 } | |
| 34 #endif | |
| 24 } | 35 } |
| 25 | 36 |
| 26 MappedMemoryManager::~MappedMemoryManager() { | 37 MappedMemoryManager::~MappedMemoryManager() { |
| 27 CommandBuffer* cmd_buf = helper_->command_buffer(); | 38 CommandBuffer* cmd_buf = helper_->command_buffer(); |
| 28 for (MemoryChunkVector::iterator iter = chunks_.begin(); | 39 for (MemoryChunkVector::iterator iter = chunks_.begin(); |
| 29 iter != chunks_.end(); ++iter) { | 40 iter != chunks_.end(); ++iter) { |
| 30 MemoryChunk* chunk = *iter; | 41 MemoryChunk* chunk = *iter; |
| 31 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); | 42 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); |
| 32 } | 43 } |
| 33 } | 44 } |
| 34 | 45 |
| 35 void* MappedMemoryManager::Alloc( | 46 void* MappedMemoryManager::Alloc( |
| 36 unsigned int size, int32* shm_id, unsigned int* shm_offset) { | 47 unsigned int size, int32* shm_id, unsigned int* shm_offset) { |
| 37 GPU_DCHECK(shm_id); | 48 GPU_DCHECK(shm_id); |
| 38 GPU_DCHECK(shm_offset); | 49 GPU_DCHECK(shm_offset); |
| 39 // See if any of the chucks can satisfy this request. | 50 // See if any of the chunks can satisfy this request. |
| 40 for (size_t ii = 0; ii < chunks_.size(); ++ii) { | 51 for (size_t ii = 0; ii < chunks_.size(); ++ii) { |
| 41 MemoryChunk* chunk = chunks_[ii]; | 52 MemoryChunk* chunk = chunks_[ii]; |
| 42 chunk->FreeUnused(); | 53 chunk->FreeUnused(); |
| 43 if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) { | 54 if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) { |
| 44 void* mem = chunk->Alloc(size); | 55 void* mem = chunk->Alloc(size); |
| 45 GPU_DCHECK(mem); | 56 GPU_DCHECK(mem); |
| 46 *shm_id = chunk->shm_id(); | 57 *shm_id = chunk->shm_id(); |
| 47 *shm_offset = chunk->GetOffset(mem); | 58 *shm_offset = chunk->GetOffset(mem); |
| 48 return mem; | 59 return mem; |
| 49 } | 60 } |
| 50 } | 61 } |
| 51 | 62 |
| 63 // If there is a memory limit being enforced and allocation of | |
| 64 // a new chunk would exceed the limit, try again with waiting. | |
| 65 if (memory_limit_ != kNoLimit && (allocated_memory_ + size) > memory_limit_) { | |
| 66 for (size_t ii = 0; ii < chunks_.size(); ++ii) { | |
| 67 MemoryChunk* chunk = chunks_[ii]; | |
| 68 chunk->FreeUnused(); | |
| 69 if (chunk->GetLargestFreeSizeWithWaiting() >= size) { | |
| 70 void* mem = chunk->Alloc(size); | |
| 71 GPU_DCHECK(mem); | |
| 72 *shm_id = chunk->shm_id(); | |
| 73 *shm_offset = chunk->GetOffset(mem); | |
| 74 return mem; | |
| 75 } | |
| 76 } | |
| 77 } | |
| 78 | |
| 52 // Make a new chunk to satisfy the request. | 79 // Make a new chunk to satisfy the request. |
| 53 CommandBuffer* cmd_buf = helper_->command_buffer(); | 80 CommandBuffer* cmd_buf = helper_->command_buffer(); |
| 54 unsigned int chunk_size = | 81 unsigned int chunk_size = |
| 55 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) * | 82 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) * |
| 56 chunk_size_multiple_; | 83 chunk_size_multiple_; |
| 57 int32 id = -1; | 84 int32 id = -1; |
| 58 gpu::Buffer shm = cmd_buf->CreateTransferBuffer(chunk_size, &id); | 85 gpu::Buffer shm = cmd_buf->CreateTransferBuffer(chunk_size, &id); |
| 59 if (id < 0) | 86 if (id < 0) |
| 60 return NULL; | 87 return NULL; |
| 61 MemoryChunk* mc = new MemoryChunk(id, shm, helper_); | 88 MemoryChunk* mc = new MemoryChunk(id, shm, helper_); |
| 89 allocated_memory_ += mc->GetSize(); | |
| 62 chunks_.push_back(mc); | 90 chunks_.push_back(mc); |
| 63 void* mem = mc->Alloc(size); | 91 void* mem = mc->Alloc(size); |
| 64 GPU_DCHECK(mem); | 92 GPU_DCHECK(mem); |
| 65 *shm_id = mc->shm_id(); | 93 *shm_id = mc->shm_id(); |
| 66 *shm_offset = mc->GetOffset(mem); | 94 *shm_offset = mc->GetOffset(mem); |
| 67 return mem; | 95 return mem; |
| 68 } | 96 } |
| 69 | 97 |
| 70 void MappedMemoryManager::Free(void* pointer) { | 98 void MappedMemoryManager::Free(void* pointer) { |
| 71 for (size_t ii = 0; ii < chunks_.size(); ++ii) { | 99 for (size_t ii = 0; ii < chunks_.size(); ++ii) { |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 90 } | 118 } |
| 91 | 119 |
| 92 void MappedMemoryManager::FreeUnused() { | 120 void MappedMemoryManager::FreeUnused() { |
| 93 CommandBuffer* cmd_buf = helper_->command_buffer(); | 121 CommandBuffer* cmd_buf = helper_->command_buffer(); |
| 94 MemoryChunkVector::iterator iter = chunks_.begin(); | 122 MemoryChunkVector::iterator iter = chunks_.begin(); |
| 95 while (iter != chunks_.end()) { | 123 while (iter != chunks_.end()) { |
| 96 MemoryChunk* chunk = *iter; | 124 MemoryChunk* chunk = *iter; |
| 97 chunk->FreeUnused(); | 125 chunk->FreeUnused(); |
| 98 if (!chunk->InUse()) { | 126 if (!chunk->InUse()) { |
| 99 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); | 127 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); |
| 128 allocated_memory_ -= chunk->GetSize(); | |
| 100 iter = chunks_.erase(iter); | 129 iter = chunks_.erase(iter); |
| 101 } else { | 130 } else { |
| 102 ++iter; | 131 ++iter; |
| 103 } | 132 } |
| 104 } | 133 } |
| 105 } | 134 } |
| 106 | 135 |
| 107 } // namespace gpu | 136 } // namespace gpu |
| 108 | 137 |
| 109 | 138 |
| 110 | 139 |
| OLD | NEW |