Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(919)

Side by Side Diff: gpu/command_buffer/client/mapped_memory.cc

Issue 23130004: Enforce a memory limit on MappedMemoryManager (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Remove unused enum Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/command_buffer/client/mapped_memory.h" 5 #include "gpu/command_buffer/client/mapped_memory.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <functional> 8 #include <functional>
9 9
10 #include "gpu/command_buffer/client/cmd_buffer_helper.h" 10 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
11 11
12 namespace gpu { 12 namespace gpu {
13 13
14 MemoryChunk::MemoryChunk( 14 MemoryChunk::MemoryChunk(
15 int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper) 15 int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper)
16 : shm_id_(shm_id), 16 : shm_id_(shm_id),
17 shm_(shm), 17 shm_(shm),
18 allocator_(shm.size, helper, shm.ptr) { 18 allocator_(shm.size, helper, shm.ptr) {
19 } 19 }
20 20
21 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper) 21 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper)
22 : chunk_size_multiple_(1), 22 : chunk_size_multiple_(1),
23 helper_(helper) { 23 helper_(helper),
24 allocated_memory_(0),
25 memory_limit_(kNoLimit) {
26 }
27
28 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
29 size_t memory_limit)
30 : chunk_size_multiple_(1),
31 helper_(helper),
32 allocated_memory_(0),
33 memory_limit_(memory_limit) {
24 } 34 }
25 35
26 MappedMemoryManager::~MappedMemoryManager() { 36 MappedMemoryManager::~MappedMemoryManager() {
27 CommandBuffer* cmd_buf = helper_->command_buffer(); 37 CommandBuffer* cmd_buf = helper_->command_buffer();
28 for (MemoryChunkVector::iterator iter = chunks_.begin(); 38 for (MemoryChunkVector::iterator iter = chunks_.begin();
29 iter != chunks_.end(); ++iter) { 39 iter != chunks_.end(); ++iter) {
30 MemoryChunk* chunk = *iter; 40 MemoryChunk* chunk = *iter;
31 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); 41 cmd_buf->DestroyTransferBuffer(chunk->shm_id());
32 } 42 }
33 } 43 }
34 44
35 void* MappedMemoryManager::Alloc( 45 void* MappedMemoryManager::Alloc(
36 unsigned int size, int32* shm_id, unsigned int* shm_offset) { 46 unsigned int size, int32* shm_id, unsigned int* shm_offset) {
37 GPU_DCHECK(shm_id); 47 GPU_DCHECK(shm_id);
38 GPU_DCHECK(shm_offset); 48 GPU_DCHECK(shm_offset);
39 // See if any of the chucks can satisfy this request. 49 // See if any of the chunks can satisfy this request.
40 for (size_t ii = 0; ii < chunks_.size(); ++ii) { 50 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
41 MemoryChunk* chunk = chunks_[ii]; 51 MemoryChunk* chunk = chunks_[ii];
42 chunk->FreeUnused(); 52 chunk->FreeUnused();
43 if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) { 53 if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) {
44 void* mem = chunk->Alloc(size); 54 void* mem = chunk->Alloc(size);
45 GPU_DCHECK(mem); 55 GPU_DCHECK(mem);
46 *shm_id = chunk->shm_id(); 56 *shm_id = chunk->shm_id();
47 *shm_offset = chunk->GetOffset(mem); 57 *shm_offset = chunk->GetOffset(mem);
48 return mem; 58 return mem;
49 } 59 }
50 } 60 }
51 61
62 // If there is a memory limit being enforced and the allocated memory
63 // is above the limit, try existing chunks again with waiting.
64 if (memory_limit_ != kNoLimit && allocated_memory_ >= memory_limit_) {
65 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
66 MemoryChunk* chunk = chunks_[ii];
67 chunk->FreeUnused();
68 if (chunk->GetLargestFreeSizeWithWaiting() >= size) {
69 void* mem = chunk->Alloc(size);
70 GPU_DCHECK(mem);
71 *shm_id = chunk->shm_id();
72 *shm_offset = chunk->GetOffset(mem);
73 return mem;
74 }
75 }
piman 2013/08/15 02:49:03 So, if we get here, that means that we're not goin
kaanb 2013/08/16 22:50:44 Done.
76 }
77
52 // Make a new chunk to satisfy the request. 78 // Make a new chunk to satisfy the request.
53 CommandBuffer* cmd_buf = helper_->command_buffer(); 79 CommandBuffer* cmd_buf = helper_->command_buffer();
54 unsigned int chunk_size = 80 unsigned int chunk_size =
55 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) * 81 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) *
56 chunk_size_multiple_; 82 chunk_size_multiple_;
57 int32 id = -1; 83 int32 id = -1;
58 gpu::Buffer shm = cmd_buf->CreateTransferBuffer(chunk_size, &id); 84 gpu::Buffer shm = cmd_buf->CreateTransferBuffer(chunk_size, &id);
59 if (id < 0) 85 if (id < 0)
60 return NULL; 86 return NULL;
61 MemoryChunk* mc = new MemoryChunk(id, shm, helper_); 87 MemoryChunk* mc = new MemoryChunk(id, shm, helper_);
88 allocated_memory_ += mc->GetSize();
62 chunks_.push_back(mc); 89 chunks_.push_back(mc);
63 void* mem = mc->Alloc(size); 90 void* mem = mc->Alloc(size);
64 GPU_DCHECK(mem); 91 GPU_DCHECK(mem);
65 *shm_id = mc->shm_id(); 92 *shm_id = mc->shm_id();
66 *shm_offset = mc->GetOffset(mem); 93 *shm_offset = mc->GetOffset(mem);
67 return mem; 94 return mem;
68 } 95 }
69 96
70 void MappedMemoryManager::Free(void* pointer) { 97 void MappedMemoryManager::Free(void* pointer) {
71 for (size_t ii = 0; ii < chunks_.size(); ++ii) { 98 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
(...skipping 18 matching lines...) Expand all
90 } 117 }
91 118
92 void MappedMemoryManager::FreeUnused() { 119 void MappedMemoryManager::FreeUnused() {
93 CommandBuffer* cmd_buf = helper_->command_buffer(); 120 CommandBuffer* cmd_buf = helper_->command_buffer();
94 MemoryChunkVector::iterator iter = chunks_.begin(); 121 MemoryChunkVector::iterator iter = chunks_.begin();
95 while (iter != chunks_.end()) { 122 while (iter != chunks_.end()) {
96 MemoryChunk* chunk = *iter; 123 MemoryChunk* chunk = *iter;
97 chunk->FreeUnused(); 124 chunk->FreeUnused();
98 if (!chunk->InUse()) { 125 if (!chunk->InUse()) {
99 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); 126 cmd_buf->DestroyTransferBuffer(chunk->shm_id());
127 allocated_memory_ -= chunk->GetSize();
100 iter = chunks_.erase(iter); 128 iter = chunks_.erase(iter);
101 } else { 129 } else {
102 ++iter; 130 ++iter;
103 } 131 }
104 } 132 }
105 } 133 }
106 134
107 } // namespace gpu 135 } // namespace gpu
108
109
110
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698