| OLD | NEW | 
|---|
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be | 
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. | 
| 4 | 4 | 
| 5 #include "gpu/command_buffer/client/mapped_memory.h" | 5 #include "gpu/command_buffer/client/mapped_memory.h" | 
| 6 | 6 | 
| 7 #include <algorithm> | 7 #include <algorithm> | 
| 8 #include <functional> | 8 #include <functional> | 
| 9 | 9 | 
|  | 10 #include "base/debug/trace_event.h" | 
| 10 #include "gpu/command_buffer/client/cmd_buffer_helper.h" | 11 #include "gpu/command_buffer/client/cmd_buffer_helper.h" | 
| 11 | 12 | 
| 12 namespace gpu { | 13 namespace gpu { | 
| 13 | 14 | 
| 14 MemoryChunk::MemoryChunk( | 15 MemoryChunk::MemoryChunk( | 
| 15     int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper) | 16     int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper) | 
| 16     : shm_id_(shm_id), | 17     : shm_id_(shm_id), | 
| 17       shm_(shm), | 18       shm_(shm), | 
| 18       allocator_(shm.size, helper, shm.ptr) { | 19       allocator_(shm.size, helper, shm.ptr) { | 
| 19 } | 20 } | 
| 20 | 21 | 
| 21 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper) | 22 // |unused_memory_reclaim_limit|: When exceeded this causes pending memory to be | 
|  | 23 // reclaimed before allocating more memory. | 
|  | 24 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper, | 
|  | 25                                          size_t unused_memory_reclaim_limit) | 
| 22     : chunk_size_multiple_(1), | 26     : chunk_size_multiple_(1), | 
| 23       helper_(helper) { | 27       helper_(helper), | 
|  | 28       allocated_memory_(0), | 
|  | 29       max_free_bytes_(unused_memory_reclaim_limit) { | 
| 24 } | 30 } | 
| 25 | 31 | 
| 26 MappedMemoryManager::~MappedMemoryManager() { | 32 MappedMemoryManager::~MappedMemoryManager() { | 
| 27   CommandBuffer* cmd_buf = helper_->command_buffer(); | 33   CommandBuffer* cmd_buf = helper_->command_buffer(); | 
| 28   for (MemoryChunkVector::iterator iter = chunks_.begin(); | 34   for (MemoryChunkVector::iterator iter = chunks_.begin(); | 
| 29        iter != chunks_.end(); ++iter) { | 35        iter != chunks_.end(); ++iter) { | 
| 30     MemoryChunk* chunk = *iter; | 36     MemoryChunk* chunk = *iter; | 
| 31     cmd_buf->DestroyTransferBuffer(chunk->shm_id()); | 37     cmd_buf->DestroyTransferBuffer(chunk->shm_id()); | 
| 32   } | 38   } | 
| 33 } | 39 } | 
| 34 | 40 | 
| 35 void* MappedMemoryManager::Alloc( | 41 void* MappedMemoryManager::Alloc( | 
| 36     unsigned int size, int32* shm_id, unsigned int* shm_offset) { | 42     unsigned int size, int32* shm_id, unsigned int* shm_offset) { | 
| 37   GPU_DCHECK(shm_id); | 43   GPU_DCHECK(shm_id); | 
| 38   GPU_DCHECK(shm_offset); | 44   GPU_DCHECK(shm_offset); | 
| 39   // See if any of the chucks can satisfy this request. | 45   if (size <= allocated_memory_) { | 
| 40   for (size_t ii = 0; ii < chunks_.size(); ++ii) { | 46     size_t total_bytes_in_use = 0; | 
| 41     MemoryChunk* chunk = chunks_[ii]; | 47     // See if any of the chunks can satisfy this request. | 
| 42     chunk->FreeUnused(); | 48     for (size_t ii = 0; ii < chunks_.size(); ++ii) { | 
| 43     if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) { | 49       MemoryChunk* chunk = chunks_[ii]; | 
| 44       void* mem = chunk->Alloc(size); | 50       chunk->FreeUnused(); | 
| 45       GPU_DCHECK(mem); | 51       total_bytes_in_use += chunk->bytes_in_use(); | 
| 46       *shm_id = chunk->shm_id(); | 52       if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) { | 
| 47       *shm_offset = chunk->GetOffset(mem); | 53         void* mem = chunk->Alloc(size); | 
| 48       return mem; | 54         GPU_DCHECK(mem); | 
|  | 55         *shm_id = chunk->shm_id(); | 
|  | 56         *shm_offset = chunk->GetOffset(mem); | 
|  | 57         return mem; | 
|  | 58       } | 
|  | 59     } | 
|  | 60 | 
|  | 61     // If there is a memory limit being enforced and total free | 
|  | 62     // memory (allocated_memory_ - total_bytes_in_use) is larger than | 
|  | 63     // the limit try waiting. | 
|  | 64     if (max_free_bytes_ != kNoLimit && | 
|  | 65         (allocated_memory_ - total_bytes_in_use) >= max_free_bytes_) { | 
|  | 66       TRACE_EVENT0("gpu", "MappedMemoryManager::Alloc::wait"); | 
|  | 67       for (size_t ii = 0; ii < chunks_.size(); ++ii) { | 
|  | 68         MemoryChunk* chunk = chunks_[ii]; | 
|  | 69         if (chunk->GetLargestFreeSizeWithWaiting() >= size) { | 
|  | 70           void* mem = chunk->Alloc(size); | 
|  | 71           GPU_DCHECK(mem); | 
|  | 72           *shm_id = chunk->shm_id(); | 
|  | 73           *shm_offset = chunk->GetOffset(mem); | 
|  | 74           return mem; | 
|  | 75         } | 
|  | 76       } | 
| 49     } | 77     } | 
| 50   } | 78   } | 
| 51 | 79 | 
| 52   // Make a new chunk to satisfy the request. | 80   // Make a new chunk to satisfy the request. | 
| 53   CommandBuffer* cmd_buf = helper_->command_buffer(); | 81   CommandBuffer* cmd_buf = helper_->command_buffer(); | 
| 54   unsigned int chunk_size = | 82   unsigned int chunk_size = | 
| 55       ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) * | 83       ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) * | 
| 56       chunk_size_multiple_; | 84       chunk_size_multiple_; | 
| 57   int32 id = -1; | 85   int32 id = -1; | 
| 58   gpu::Buffer shm = cmd_buf->CreateTransferBuffer(chunk_size, &id); | 86   gpu::Buffer shm = cmd_buf->CreateTransferBuffer(chunk_size, &id); | 
| 59   if (id  < 0) | 87   if (id  < 0) | 
| 60     return NULL; | 88     return NULL; | 
| 61   MemoryChunk* mc = new MemoryChunk(id, shm, helper_); | 89   MemoryChunk* mc = new MemoryChunk(id, shm, helper_); | 
|  | 90   allocated_memory_ += mc->GetSize(); | 
| 62   chunks_.push_back(mc); | 91   chunks_.push_back(mc); | 
| 63   void* mem = mc->Alloc(size); | 92   void* mem = mc->Alloc(size); | 
| 64   GPU_DCHECK(mem); | 93   GPU_DCHECK(mem); | 
| 65   *shm_id = mc->shm_id(); | 94   *shm_id = mc->shm_id(); | 
| 66   *shm_offset = mc->GetOffset(mem); | 95   *shm_offset = mc->GetOffset(mem); | 
| 67   return mem; | 96   return mem; | 
| 68 } | 97 } | 
| 69 | 98 | 
| 70 void MappedMemoryManager::Free(void* pointer) { | 99 void MappedMemoryManager::Free(void* pointer) { | 
| 71   for (size_t ii = 0; ii < chunks_.size(); ++ii) { | 100   for (size_t ii = 0; ii < chunks_.size(); ++ii) { | 
| (...skipping 18 matching lines...) Expand all  Loading... | 
| 90 } | 119 } | 
| 91 | 120 | 
| 92 void MappedMemoryManager::FreeUnused() { | 121 void MappedMemoryManager::FreeUnused() { | 
| 93   CommandBuffer* cmd_buf = helper_->command_buffer(); | 122   CommandBuffer* cmd_buf = helper_->command_buffer(); | 
| 94   MemoryChunkVector::iterator iter = chunks_.begin(); | 123   MemoryChunkVector::iterator iter = chunks_.begin(); | 
| 95   while (iter != chunks_.end()) { | 124   while (iter != chunks_.end()) { | 
| 96     MemoryChunk* chunk = *iter; | 125     MemoryChunk* chunk = *iter; | 
| 97     chunk->FreeUnused(); | 126     chunk->FreeUnused(); | 
| 98     if (!chunk->InUse()) { | 127     if (!chunk->InUse()) { | 
| 99       cmd_buf->DestroyTransferBuffer(chunk->shm_id()); | 128       cmd_buf->DestroyTransferBuffer(chunk->shm_id()); | 
|  | 129       allocated_memory_ -= chunk->GetSize(); | 
| 100       iter = chunks_.erase(iter); | 130       iter = chunks_.erase(iter); | 
| 101     } else { | 131     } else { | 
| 102       ++iter; | 132       ++iter; | 
| 103     } | 133     } | 
| 104   } | 134   } | 
| 105 } | 135 } | 
| 106 | 136 | 
| 107 }  // namespace gpu | 137 }  // namespace gpu | 
| 108 |  | 
| 109 |  | 
| 110 |  | 
| OLD | NEW | 
|---|