Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(732)

Side by Side Diff: gpu/command_buffer/client/mapped_memory.cc

Issue 23130004: Enforce a memory limit on MappedMemoryManager (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: fix another namespace error Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/command_buffer/client/mapped_memory.h" 5 #include "gpu/command_buffer/client/mapped_memory.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <functional> 8 #include <functional>
9 9
10 #include "base/debug/trace_event.h"
10 #include "gpu/command_buffer/client/cmd_buffer_helper.h" 11 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
11 12
12 namespace gpu { 13 namespace gpu {
13 14
14 MemoryChunk::MemoryChunk( 15 MemoryChunk::MemoryChunk(
15 int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper) 16 int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper)
16 : shm_id_(shm_id), 17 : shm_id_(shm_id),
17 shm_(shm), 18 shm_(shm),
18 allocator_(shm.size, helper, shm.ptr) { 19 allocator_(shm.size, helper, shm.ptr) {
19 } 20 }
20 21
21 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper) 22 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
23 size_t unused_memory_reclaim_limit)
22 : chunk_size_multiple_(1), 24 : chunk_size_multiple_(1),
23 helper_(helper) { 25 helper_(helper),
26 allocated_memory_(0),
27 max_free_bytes_(unused_memory_reclaim_limit) {
24 } 28 }
25 29
26 MappedMemoryManager::~MappedMemoryManager() { 30 MappedMemoryManager::~MappedMemoryManager() {
27 CommandBuffer* cmd_buf = helper_->command_buffer(); 31 CommandBuffer* cmd_buf = helper_->command_buffer();
28 for (MemoryChunkVector::iterator iter = chunks_.begin(); 32 for (MemoryChunkVector::iterator iter = chunks_.begin();
29 iter != chunks_.end(); ++iter) { 33 iter != chunks_.end(); ++iter) {
30 MemoryChunk* chunk = *iter; 34 MemoryChunk* chunk = *iter;
31 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); 35 cmd_buf->DestroyTransferBuffer(chunk->shm_id());
32 } 36 }
33 } 37 }
34 38
35 void* MappedMemoryManager::Alloc( 39 void* MappedMemoryManager::Alloc(
36 unsigned int size, int32* shm_id, unsigned int* shm_offset) { 40 unsigned int size, int32* shm_id, unsigned int* shm_offset) {
37 GPU_DCHECK(shm_id); 41 GPU_DCHECK(shm_id);
38 GPU_DCHECK(shm_offset); 42 GPU_DCHECK(shm_offset);
39 // See if any of the chucks can satisfy this request. 43 if (size <= allocated_memory_) {
40 for (size_t ii = 0; ii < chunks_.size(); ++ii) { 44 size_t total_bytes_in_use = 0;
41 MemoryChunk* chunk = chunks_[ii]; 45 // See if any of the chunks can satisfy this request.
42 chunk->FreeUnused(); 46 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
43 if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) { 47 MemoryChunk* chunk = chunks_[ii];
44 void* mem = chunk->Alloc(size); 48 chunk->FreeUnused();
45 GPU_DCHECK(mem); 49 total_bytes_in_use += chunk->bytes_in_use();
46 *shm_id = chunk->shm_id(); 50 if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) {
47 *shm_offset = chunk->GetOffset(mem); 51 void* mem = chunk->Alloc(size);
48 return mem; 52 GPU_DCHECK(mem);
53 *shm_id = chunk->shm_id();
54 *shm_offset = chunk->GetOffset(mem);
55 return mem;
56 }
57 }
58
59 // If there is a memory limit being enforced and total free
60 // memory (allocated_memory_ - total_bytes_in_use) is larger than
61 // the limit try waiting.
62 if (max_free_bytes_ != kNoLimit &&
63 (allocated_memory_ - total_bytes_in_use) >= max_free_bytes_) {
64 TRACE_EVENT0("gpu", "MappedMemoryManager::Alloc::wait");
65 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
66 MemoryChunk* chunk = chunks_[ii];
67 if (chunk->GetLargestFreeSizeWithWaiting() >= size) {
68 void* mem = chunk->Alloc(size);
69 GPU_DCHECK(mem);
70 *shm_id = chunk->shm_id();
71 *shm_offset = chunk->GetOffset(mem);
72 return mem;
73 }
74 }
49 } 75 }
50 } 76 }
51 77
52 // Make a new chunk to satisfy the request. 78 // Make a new chunk to satisfy the request.
53 CommandBuffer* cmd_buf = helper_->command_buffer(); 79 CommandBuffer* cmd_buf = helper_->command_buffer();
54 unsigned int chunk_size = 80 unsigned int chunk_size =
55 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) * 81 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) *
56 chunk_size_multiple_; 82 chunk_size_multiple_;
57 int32 id = -1; 83 int32 id = -1;
58 gpu::Buffer shm = cmd_buf->CreateTransferBuffer(chunk_size, &id); 84 gpu::Buffer shm = cmd_buf->CreateTransferBuffer(chunk_size, &id);
59 if (id < 0) 85 if (id < 0)
60 return NULL; 86 return NULL;
61 MemoryChunk* mc = new MemoryChunk(id, shm, helper_); 87 MemoryChunk* mc = new MemoryChunk(id, shm, helper_);
88 allocated_memory_ += mc->GetSize();
62 chunks_.push_back(mc); 89 chunks_.push_back(mc);
63 void* mem = mc->Alloc(size); 90 void* mem = mc->Alloc(size);
64 GPU_DCHECK(mem); 91 GPU_DCHECK(mem);
65 *shm_id = mc->shm_id(); 92 *shm_id = mc->shm_id();
66 *shm_offset = mc->GetOffset(mem); 93 *shm_offset = mc->GetOffset(mem);
67 return mem; 94 return mem;
68 } 95 }
69 96
70 void MappedMemoryManager::Free(void* pointer) { 97 void MappedMemoryManager::Free(void* pointer) {
71 for (size_t ii = 0; ii < chunks_.size(); ++ii) { 98 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
(...skipping 18 matching lines...) Expand all
90 } 117 }
91 118
92 void MappedMemoryManager::FreeUnused() { 119 void MappedMemoryManager::FreeUnused() {
93 CommandBuffer* cmd_buf = helper_->command_buffer(); 120 CommandBuffer* cmd_buf = helper_->command_buffer();
94 MemoryChunkVector::iterator iter = chunks_.begin(); 121 MemoryChunkVector::iterator iter = chunks_.begin();
95 while (iter != chunks_.end()) { 122 while (iter != chunks_.end()) {
96 MemoryChunk* chunk = *iter; 123 MemoryChunk* chunk = *iter;
97 chunk->FreeUnused(); 124 chunk->FreeUnused();
98 if (!chunk->InUse()) { 125 if (!chunk->InUse()) {
99 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); 126 cmd_buf->DestroyTransferBuffer(chunk->shm_id());
127 allocated_memory_ -= chunk->GetSize();
100 iter = chunks_.erase(iter); 128 iter = chunks_.erase(iter);
101 } else { 129 } else {
102 ++iter; 130 ++iter;
103 } 131 }
104 } 132 }
105 } 133 }
106 134
107 } // namespace gpu 135 } // namespace gpu
108
109
110
OLDNEW
« no previous file with comments | « gpu/command_buffer/client/mapped_memory.h ('k') | gpu/command_buffer/client/mapped_memory_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698