Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(210)

Side by Side Diff: gpu/command_buffer/client/mapped_memory.cc

Issue 116863003: gpu: Reuse transfer buffers more aggresively (Closed) Base URL: http://git.chromium.org/chromium/src.git@master
Patch Set: Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/command_buffer/client/mapped_memory.h" 5 #include "gpu/command_buffer/client/mapped_memory.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <functional> 8 #include <functional>
9 9
10 #include "base/debug/trace_event.h" 10 #include "base/debug/trace_event.h"
11 #include "base/logging.h" 11 #include "base/logging.h"
12 #include "gpu/command_buffer/client/cmd_buffer_helper.h" 12 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
13 13
14 namespace gpu { 14 namespace gpu {
15 15
16 MemoryChunk::MemoryChunk( 16 MemoryChunk::MemoryChunk(int32 shm_id,
17 int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper) 17 gpu::Buffer shm,
18 bool aggressive_reuse,
19 CommandBufferHelper* helper)
18 : shm_id_(shm_id), 20 : shm_id_(shm_id),
19 shm_(shm), 21 shm_(shm),
20 allocator_(shm.size, helper, shm.ptr) { 22 allocator_(shm.size, aggressive_reuse, helper, shm.ptr) {
21 } 23 }
22 24
23 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper, 25 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
24 size_t unused_memory_reclaim_limit) 26 MappedMemoryManagerSettings settings)
25 : chunk_size_multiple_(1), 27 : chunk_size_multiple_(1),
26 helper_(helper), 28 helper_(helper),
27 allocated_memory_(0), 29 allocated_memory_(0),
28 max_free_bytes_(unused_memory_reclaim_limit) { 30 max_free_bytes_(settings.unused_memory_reclaim_limit),
31 aggressive_reuse_(settings.aggressive_reuse) {
29 } 32 }
30 33
31 MappedMemoryManager::~MappedMemoryManager() { 34 MappedMemoryManager::~MappedMemoryManager() {
32 CommandBuffer* cmd_buf = helper_->command_buffer(); 35 CommandBuffer* cmd_buf = helper_->command_buffer();
33 for (MemoryChunkVector::iterator iter = chunks_.begin(); 36 for (MemoryChunkVector::iterator iter = chunks_.begin();
34 iter != chunks_.end(); ++iter) { 37 iter != chunks_.end(); ++iter) {
35 MemoryChunk* chunk = *iter; 38 MemoryChunk* chunk = *iter;
36 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); 39 cmd_buf->DestroyTransferBuffer(chunk->shm_id());
37 } 40 }
38 } 41 }
39 42
40 void* MappedMemoryManager::Alloc( 43 void* MappedMemoryManager::Alloc(
41 unsigned int size, int32* shm_id, unsigned int* shm_offset) { 44 unsigned int size, int32* shm_id, unsigned int* shm_offset) {
42 DCHECK(shm_id); 45 DCHECK(shm_id);
43 DCHECK(shm_offset); 46 DCHECK(shm_offset);
44 if (size <= allocated_memory_) { 47 if (size <= allocated_memory_) {
45 size_t total_bytes_in_use = 0; 48 size_t total_bytes_in_use = 0;
46 // See if any of the chunks can satisfy this request. 49 // See if any of the chunks can satisfy this request.
47 for (size_t ii = 0; ii < chunks_.size(); ++ii) { 50 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
48 MemoryChunk* chunk = chunks_[ii]; 51 MemoryChunk* chunk = chunks_[ii];
49 chunk->FreeUnused(); 52 chunk->FreeUnused();
50 total_bytes_in_use += chunk->bytes_in_use(); 53 total_bytes_in_use += chunk->bytes_in_use();
51 if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) { 54 unsigned int largest_size =
55 aggressive_reuse_ ? chunk->GetLargestFreeSizeWithWaiting()
56 : chunk->GetLargestFreeSizeWithoutWaiting();
57 if (largest_size >= size) {
52 void* mem = chunk->Alloc(size); 58 void* mem = chunk->Alloc(size);
53 DCHECK(mem); 59 DCHECK(mem);
54 *shm_id = chunk->shm_id(); 60 *shm_id = chunk->shm_id();
55 *shm_offset = chunk->GetOffset(mem); 61 *shm_offset = chunk->GetOffset(mem);
56 return mem; 62 return mem;
57 } 63 }
58 } 64 }
59 65
60 // If there is a memory limit being enforced and total free 66 // If there is a memory limit being enforced and total free
61 // memory (allocated_memory_ - total_bytes_in_use) is larger than 67 // memory (allocated_memory_ - total_bytes_in_use) is larger than
62 // the limit try waiting. 68 // the limit try waiting.
63 if (max_free_bytes_ != kNoLimit && 69 if (!aggressive_reuse_ && max_free_bytes_ != kNoLimit &&
64 (allocated_memory_ - total_bytes_in_use) >= max_free_bytes_) { 70 (allocated_memory_ - total_bytes_in_use) >= max_free_bytes_) {
65 TRACE_EVENT0("gpu", "MappedMemoryManager::Alloc::wait"); 71 TRACE_EVENT0("gpu", "MappedMemoryManager::Alloc::wait");
66 for (size_t ii = 0; ii < chunks_.size(); ++ii) { 72 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
67 MemoryChunk* chunk = chunks_[ii]; 73 MemoryChunk* chunk = chunks_[ii];
68 if (chunk->GetLargestFreeSizeWithWaiting() >= size) { 74 if (chunk->GetLargestFreeSizeWithWaiting() >= size) {
69 void* mem = chunk->Alloc(size); 75 void* mem = chunk->Alloc(size);
70 DCHECK(mem); 76 DCHECK(mem);
71 *shm_id = chunk->shm_id(); 77 *shm_id = chunk->shm_id();
72 *shm_offset = chunk->GetOffset(mem); 78 *shm_offset = chunk->GetOffset(mem);
73 return mem; 79 return mem;
74 } 80 }
75 } 81 }
76 } 82 }
77 } 83 }
78 84
79 // Make a new chunk to satisfy the request. 85 // Make a new chunk to satisfy the request.
80 CommandBuffer* cmd_buf = helper_->command_buffer(); 86 CommandBuffer* cmd_buf = helper_->command_buffer();
81 unsigned int chunk_size = 87 unsigned int chunk_size =
82 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) * 88 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) *
83 chunk_size_multiple_; 89 chunk_size_multiple_;
84 int32 id = -1; 90 int32 id = -1;
85 gpu::Buffer shm = cmd_buf->CreateTransferBuffer(chunk_size, &id); 91 gpu::Buffer shm = cmd_buf->CreateTransferBuffer(chunk_size, &id);
86 if (id < 0) 92 if (id < 0)
87 return NULL; 93 return NULL;
88 MemoryChunk* mc = new MemoryChunk(id, shm, helper_); 94 MemoryChunk* mc = new MemoryChunk(id, shm, aggressive_reuse_, helper_);
89 allocated_memory_ += mc->GetSize(); 95 allocated_memory_ += mc->GetSize();
90 chunks_.push_back(mc); 96 chunks_.push_back(mc);
91 void* mem = mc->Alloc(size); 97 void* mem = mc->Alloc(size);
92 DCHECK(mem); 98 DCHECK(mem);
93 *shm_id = mc->shm_id(); 99 *shm_id = mc->shm_id();
94 *shm_offset = mc->GetOffset(mem); 100 *shm_offset = mc->GetOffset(mem);
95 return mem; 101 return mem;
96 } 102 }
97 103
98 void MappedMemoryManager::Free(void* pointer) { 104 void MappedMemoryManager::Free(void* pointer) {
(...skipping 28 matching lines...) Expand all
127 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); 133 cmd_buf->DestroyTransferBuffer(chunk->shm_id());
128 allocated_memory_ -= chunk->GetSize(); 134 allocated_memory_ -= chunk->GetSize();
129 iter = chunks_.erase(iter); 135 iter = chunks_.erase(iter);
130 } else { 136 } else {
131 ++iter; 137 ++iter;
132 } 138 }
133 } 139 }
134 } 140 }
135 141
136 } // namespace gpu 142 } // namespace gpu
OLDNEW
« gpu/command_buffer/client/fenced_allocator.h ('K') | « gpu/command_buffer/client/mapped_memory.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698