OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ | 5 #ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ |
6 #define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ | 6 #define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ |
7 | 7 |
8 #include "base/memory/scoped_vector.h" | 8 #include "base/memory/scoped_vector.h" |
9 #include "gpu/command_buffer/client/fenced_allocator.h" | 9 #include "gpu/command_buffer/client/fenced_allocator.h" |
10 #include "gpu/command_buffer/common/buffer.h" | 10 #include "gpu/command_buffer/common/buffer.h" |
11 #include "gpu/command_buffer/common/types.h" | 11 #include "gpu/command_buffer/common/types.h" |
12 #include "gpu/gpu_export.h" | 12 #include "gpu/gpu_export.h" |
13 | 13 |
14 namespace gpu { | 14 namespace gpu { |
15 | 15 |
16 class CommandBufferHelper; | 16 class CommandBufferHelper; |
17 | 17 |
18 // Manages a shared memory segment. | 18 // Manages a shared memory segment. |
19 class GPU_EXPORT MemoryChunk { | 19 class GPU_EXPORT MemoryChunk { |
20 public: | 20 public: |
21 MemoryChunk(int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper); | 21 MemoryChunk(int32 shm_id, |
| 22 gpu::Buffer shm, |
| 23 bool aggressive_reuse, |
| 24 CommandBufferHelper* helper); |
22 | 25 |
23 // Gets the size of the largest free block that is available without waiting. | 26 // Gets the size of the largest free block that is available without waiting. |
24 unsigned int GetLargestFreeSizeWithoutWaiting() { | 27 unsigned int GetLargestFreeSizeWithoutWaiting() { |
25 return allocator_.GetLargestFreeSize(); | 28 return allocator_.GetLargestFreeSize(); |
26 } | 29 } |
27 | 30 |
28 // Gets the size of the largest free block that can be allocated if the | 31 // Gets the size of the largest free block that can be allocated if the |
29 // caller can wait. | 32 // caller can wait. |
30 unsigned int GetLargestFreeSizeWithWaiting() { | 33 unsigned int GetLargestFreeSizeWithWaiting() { |
31 return allocator_.GetLargestFreeOrPendingSize(); | 34 return allocator_.GetLargestFreeOrPendingSize(); |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
100 } | 103 } |
101 | 104 |
102 private: | 105 private: |
103 int32 shm_id_; | 106 int32 shm_id_; |
104 gpu::Buffer shm_; | 107 gpu::Buffer shm_; |
105 FencedAllocatorWrapper allocator_; | 108 FencedAllocatorWrapper allocator_; |
106 | 109 |
107 DISALLOW_COPY_AND_ASSIGN(MemoryChunk); | 110 DISALLOW_COPY_AND_ASSIGN(MemoryChunk); |
108 }; | 111 }; |
109 | 112 |
| 113 // Configure MappedMemoryManager |
| 114 struct GPU_EXPORT MappedMemoryManagerSettings { |
| 115 // |unused_memory_reclaim_limit|: When exceeded this causes pending memory |
| 116 // to be reclaimed before allocating more memory. |
| 117 size_t unused_memory_reclaim_limit; |
| 118 |
| 119 // |aggressive_reuse|: When set to true, the memory manager will reuse a |
| 120 // block marked either as FREE or FREE_PENDING_TOKEN. This setting is only |
| 121 // allowed if the user can guarantee that all parts are done using a block |
| 122 // that was free:ed by the client. |
| 123 bool aggressive_reuse; |
| 124 }; |
| 125 |
110 // Manages MemoryChunks. | 126 // Manages MemoryChunks. |
111 class GPU_EXPORT MappedMemoryManager { | 127 class GPU_EXPORT MappedMemoryManager { |
112 public: | 128 public: |
113 enum MemoryLimit { | 129 enum MemoryLimit { |
114 kNoLimit = 0, | 130 kNoLimit = 0, |
115 }; | 131 }; |
116 | 132 |
117 // |unused_memory_reclaim_limit|: When exceeded this causes pending memory | |
118 // to be reclaimed before allocating more memory. | |
119 MappedMemoryManager(CommandBufferHelper* helper, | 133 MappedMemoryManager(CommandBufferHelper* helper, |
120 size_t unused_memory_reclaim_limit); | 134 MappedMemoryManagerSettings settings); |
121 | 135 |
122 ~MappedMemoryManager(); | 136 ~MappedMemoryManager(); |
123 | 137 |
124 unsigned int chunk_size_multiple() const { | 138 unsigned int chunk_size_multiple() const { |
125 return chunk_size_multiple_; | 139 return chunk_size_multiple_; |
126 } | 140 } |
127 | 141 |
128 void set_chunk_size_multiple(unsigned int multiple) { | 142 void set_chunk_size_multiple(unsigned int multiple) { |
129 chunk_size_multiple_ = multiple; | 143 chunk_size_multiple_ = multiple; |
130 } | 144 } |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
168 | 182 |
169 private: | 183 private: |
170 typedef ScopedVector<MemoryChunk> MemoryChunkVector; | 184 typedef ScopedVector<MemoryChunk> MemoryChunkVector; |
171 | 185 |
172 // size a chunk is rounded up to. | 186 // size a chunk is rounded up to. |
173 unsigned int chunk_size_multiple_; | 187 unsigned int chunk_size_multiple_; |
174 CommandBufferHelper* helper_; | 188 CommandBufferHelper* helper_; |
175 MemoryChunkVector chunks_; | 189 MemoryChunkVector chunks_; |
176 size_t allocated_memory_; | 190 size_t allocated_memory_; |
177 size_t max_free_bytes_; | 191 size_t max_free_bytes_; |
| 192 bool aggressive_reuse_; |
178 | 193 |
179 DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager); | 194 DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager); |
180 }; | 195 }; |
181 | 196 |
182 } // namespace gpu | 197 } // namespace gpu |
183 | 198 |
184 #endif // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ | 199 #endif // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ |
185 | 200 |
OLD | NEW |