Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(46)

Side by Side Diff: gpu/command_buffer/client/mapped_memory.h

Issue 23130004: Enforce a memory limit on MappedMemoryManager (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Remove unused enum Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ 5 #ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
6 #define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ 6 #define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
7 7
8 #include "base/memory/scoped_vector.h" 8 #include "base/memory/scoped_vector.h"
9 #include "gpu/command_buffer/client/fenced_allocator.h" 9 #include "gpu/command_buffer/client/fenced_allocator.h"
10 #include "gpu/command_buffer/common/buffer.h" 10 #include "gpu/command_buffer/common/buffer.h"
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
72 // Frees a block of memory, pending the passage of a token. That memory won't 72 // Frees a block of memory, pending the passage of a token. That memory won't
73 // be re-allocated until the token has passed through the command stream. 73 // be re-allocated until the token has passed through the command stream.
74 // 74 //
75 // Parameters: 75 // Parameters:
76 // pointer: the pointer to the memory block to free. 76 // pointer: the pointer to the memory block to free.
77 // token: the token value to wait for before re-using the memory. 77 // token: the token value to wait for before re-using the memory.
78 void FreePendingToken(void* pointer, unsigned int token) { 78 void FreePendingToken(void* pointer, unsigned int token) {
79 allocator_.FreePendingToken(pointer, token); 79 allocator_.FreePendingToken(pointer, token);
80 } 80 }
81 81
82 // Frees any blocks who's tokens have passed. 82 // Frees any blocks whose tokens have passed.
83 void FreeUnused() { 83 void FreeUnused() {
84 allocator_.FreeUnused(); 84 allocator_.FreeUnused();
85 } 85 }
86 86
87 // Returns true if pointer is in the range of this block. 87 // Returns true if pointer is in the range of this block.
88 bool IsInChunk(void* pointer) const { 88 bool IsInChunk(void* pointer) const {
89 return pointer >= shm_.ptr && 89 return pointer >= shm_.ptr &&
90 pointer < reinterpret_cast<const int8*>(shm_.ptr) + shm_.size; 90 pointer < reinterpret_cast<const int8*>(shm_.ptr) + shm_.size;
91 } 91 }
92 92
93 // Returns true of any memory in this chuck is in use. 93 // Returns true of any memory in this chunk is in use.
94 bool InUse() { 94 bool InUse() {
95 return allocator_.InUse(); 95 return allocator_.InUse();
96 } 96 }
97 97
98 private: 98 private:
99 int32 shm_id_; 99 int32 shm_id_;
100 gpu::Buffer shm_; 100 gpu::Buffer shm_;
101 FencedAllocatorWrapper allocator_; 101 FencedAllocatorWrapper allocator_;
102 102
103 DISALLOW_COPY_AND_ASSIGN(MemoryChunk); 103 DISALLOW_COPY_AND_ASSIGN(MemoryChunk);
104 }; 104 };
105 105
106 // Manages MemoryChucks. 106 // Manages MemoryChunks.
107 class GPU_EXPORT MappedMemoryManager { 107 class GPU_EXPORT MappedMemoryManager {
108 public: 108 public:
109 explicit MappedMemoryManager(CommandBufferHelper* helper); 109 explicit MappedMemoryManager(CommandBufferHelper* helper);
110 explicit MappedMemoryManager(CommandBufferHelper* helper,
piman 2013/08/15 02:49:03 No explicit. Do we really need 2 constructors? Can
kaanb 2013/08/16 22:50:44 Removed the explicit. I'd prefer to keep this patc
111 size_t memory_limit);
110 112
111 ~MappedMemoryManager(); 113 ~MappedMemoryManager();
112 114
113 unsigned int chunk_size_multiple() const { 115 unsigned int chunk_size_multiple() const {
114 return chunk_size_multiple_; 116 return chunk_size_multiple_;
115 } 117 }
116 118
117 void set_chunk_size_multiple(unsigned int multiple) { 119 void set_chunk_size_multiple(unsigned int multiple) {
118 chunk_size_multiple_ = multiple; 120 chunk_size_multiple_ = multiple;
119 } 121 }
(...skipping 24 matching lines...) Expand all
144 146
145 // Free Any Shared memory that is not in use. 147 // Free Any Shared memory that is not in use.
146 void FreeUnused(); 148 void FreeUnused();
147 149
148 // Used for testing 150 // Used for testing
149 size_t num_chunks() { 151 size_t num_chunks() {
150 return chunks_.size(); 152 return chunks_.size();
151 } 153 }
152 154
153 private: 155 private:
156 enum MemoryLimit {
157 kNoLimit = 0,
158 };
154 typedef ScopedVector<MemoryChunk> MemoryChunkVector; 159 typedef ScopedVector<MemoryChunk> MemoryChunkVector;
155 160
156 // size a chunk is rounded up to. 161 // size a chunk is rounded up to.
157 unsigned int chunk_size_multiple_; 162 unsigned int chunk_size_multiple_;
158 CommandBufferHelper* helper_; 163 CommandBufferHelper* helper_;
159 MemoryChunkVector chunks_; 164 MemoryChunkVector chunks_;
165 size_t allocated_memory_;
166 size_t memory_limit_;
160 167
161 DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager); 168 DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager);
162 }; 169 };
163 170
164 } // namespace gpu 171 } // namespace gpu
165 172
166 #endif // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ 173 #endif // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
167 174
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698