Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(337)

Side by Side Diff: gpu/command_buffer/client/mapped_memory.cc

Issue 23130004: Enforce a memory limit on MappedMemoryManager (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Set chunk size Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/command_buffer/client/mapped_memory.h" 5 #include "gpu/command_buffer/client/mapped_memory.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <functional> 8 #include <functional>
9 9
10 #include "gpu/command_buffer/client/cmd_buffer_helper.h" 10 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
11 11
12 namespace gpu { 12 namespace gpu {
13 13
14 MemoryChunk::MemoryChunk( 14 MemoryChunk::MemoryChunk(
15 int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper) 15 int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper)
16 : shm_id_(shm_id), 16 : shm_id_(shm_id),
17 shm_(shm), 17 shm_(shm),
18 allocator_(shm.size, helper, shm.ptr) { 18 allocator_(shm.size, helper, shm.ptr) {
19 } 19 }
20 20
21 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper) 21 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
22 size_t memory_limit)
22 : chunk_size_multiple_(1), 23 : chunk_size_multiple_(1),
23 helper_(helper) { 24 helper_(helper),
25 allocated_memory_(0),
26 max_free_bytes_(memory_limit) {
24 } 27 }
25 28
26 MappedMemoryManager::~MappedMemoryManager() { 29 MappedMemoryManager::~MappedMemoryManager() {
27 CommandBuffer* cmd_buf = helper_->command_buffer(); 30 CommandBuffer* cmd_buf = helper_->command_buffer();
28 for (MemoryChunkVector::iterator iter = chunks_.begin(); 31 for (MemoryChunkVector::iterator iter = chunks_.begin();
29 iter != chunks_.end(); ++iter) { 32 iter != chunks_.end(); ++iter) {
30 MemoryChunk* chunk = *iter; 33 MemoryChunk* chunk = *iter;
31 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); 34 cmd_buf->DestroyTransferBuffer(chunk->shm_id());
32 } 35 }
33 } 36 }
34 37
35 void* MappedMemoryManager::Alloc( 38 void* MappedMemoryManager::Alloc(
36 unsigned int size, int32* shm_id, unsigned int* shm_offset) { 39 unsigned int size, int32* shm_id, unsigned int* shm_offset) {
37 GPU_DCHECK(shm_id); 40 GPU_DCHECK(shm_id);
38 GPU_DCHECK(shm_offset); 41 GPU_DCHECK(shm_offset);
no sievers 2013/08/20 15:59:17 Can you put 'if (size <= allocated_memory_)' aroun
kaanb 2013/08/20 22:15:46 Done.
39 // See if any of the chucks can satisfy this request. 42 size_t total_bytes_in_use = 0;
43 // See if any of the chunks can satisfy this request.
40 for (size_t ii = 0; ii < chunks_.size(); ++ii) { 44 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
41 MemoryChunk* chunk = chunks_[ii]; 45 MemoryChunk* chunk = chunks_[ii];
42 chunk->FreeUnused(); 46 chunk->FreeUnused();
47 total_bytes_in_use += chunk->bytes_in_use();
43 if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) { 48 if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) {
44 void* mem = chunk->Alloc(size); 49 void* mem = chunk->Alloc(size);
45 GPU_DCHECK(mem); 50 GPU_DCHECK(mem);
46 *shm_id = chunk->shm_id(); 51 *shm_id = chunk->shm_id();
47 *shm_offset = chunk->GetOffset(mem); 52 *shm_offset = chunk->GetOffset(mem);
48 return mem; 53 return mem;
49 } 54 }
50 } 55 }
51 56
57 // If there is a memory limit being enforced and total free
58 // memory (allocated_memory_ - total_bytes_in_use) is larger than
59 // the limit try waiting.
60 if (max_free_bytes_ != kNoLimit &&
61 (allocated_memory_ - total_bytes_in_use) >= max_free_bytes_) {
epenner 2013/08/20 18:02:03 Please add a trace in this if statement.
kaanb 2013/08/20 22:15:46 Done.
62 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
63 MemoryChunk* chunk = chunks_[ii];
64 chunk->FreeUnused();
no sievers 2013/08/20 02:46:54 Should we skip the call to FreeUnused() here since
kaanb 2013/08/20 22:15:46 Done.
65 if (chunk->GetLargestFreeSizeWithWaiting() >= size) {
66 void* mem = chunk->Alloc(size);
67 GPU_DCHECK(mem);
68 *shm_id = chunk->shm_id();
69 *shm_offset = chunk->GetOffset(mem);
70 return mem;
71 }
72 }
73 LOG(INFO) << "Exceeding memory limit: " << max_free_bytes_;
no sievers 2013/08/20 02:46:54 Hmm that might be spammy. How about a trace event
kaanb 2013/08/20 22:15:46 I put the trace event in this method.
74 }
75
52 // Make a new chunk to satisfy the request. 76 // Make a new chunk to satisfy the request.
53 CommandBuffer* cmd_buf = helper_->command_buffer(); 77 CommandBuffer* cmd_buf = helper_->command_buffer();
54 unsigned int chunk_size = 78 unsigned int chunk_size =
55 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) * 79 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) *
56 chunk_size_multiple_; 80 chunk_size_multiple_;
57 int32 id = -1; 81 int32 id = -1;
58 gpu::Buffer shm = cmd_buf->CreateTransferBuffer(chunk_size, &id); 82 gpu::Buffer shm = cmd_buf->CreateTransferBuffer(chunk_size, &id);
59 if (id < 0) 83 if (id < 0)
60 return NULL; 84 return NULL;
61 MemoryChunk* mc = new MemoryChunk(id, shm, helper_); 85 MemoryChunk* mc = new MemoryChunk(id, shm, helper_);
86 allocated_memory_ += mc->GetSize();
62 chunks_.push_back(mc); 87 chunks_.push_back(mc);
63 void* mem = mc->Alloc(size); 88 void* mem = mc->Alloc(size);
64 GPU_DCHECK(mem); 89 GPU_DCHECK(mem);
65 *shm_id = mc->shm_id(); 90 *shm_id = mc->shm_id();
66 *shm_offset = mc->GetOffset(mem); 91 *shm_offset = mc->GetOffset(mem);
67 return mem; 92 return mem;
68 } 93 }
69 94
70 void MappedMemoryManager::Free(void* pointer) { 95 void MappedMemoryManager::Free(void* pointer) {
71 for (size_t ii = 0; ii < chunks_.size(); ++ii) { 96 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
(...skipping 18 matching lines...) Expand all
90 } 115 }
91 116
92 void MappedMemoryManager::FreeUnused() { 117 void MappedMemoryManager::FreeUnused() {
93 CommandBuffer* cmd_buf = helper_->command_buffer(); 118 CommandBuffer* cmd_buf = helper_->command_buffer();
94 MemoryChunkVector::iterator iter = chunks_.begin(); 119 MemoryChunkVector::iterator iter = chunks_.begin();
95 while (iter != chunks_.end()) { 120 while (iter != chunks_.end()) {
96 MemoryChunk* chunk = *iter; 121 MemoryChunk* chunk = *iter;
97 chunk->FreeUnused(); 122 chunk->FreeUnused();
98 if (!chunk->InUse()) { 123 if (!chunk->InUse()) {
99 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); 124 cmd_buf->DestroyTransferBuffer(chunk->shm_id());
125 allocated_memory_ -= chunk->GetSize();
100 iter = chunks_.erase(iter); 126 iter = chunks_.erase(iter);
101 } else { 127 } else {
102 ++iter; 128 ++iter;
103 } 129 }
104 } 130 }
105 } 131 }
106 132
107 } // namespace gpu 133 } // namespace gpu
108
109
110
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698