Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(214)

Unified Diff: gpu/command_buffer/client/mapped_memory.cc

Issue 23130004: Enforce a memory limit on MappedMemoryManager (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Minor fixes Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « gpu/command_buffer/client/mapped_memory.h ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: gpu/command_buffer/client/mapped_memory.cc
diff --git a/gpu/command_buffer/client/mapped_memory.cc b/gpu/command_buffer/client/mapped_memory.cc
index 82829d48ec1cda6fd7534f596b95acbce03b8522..8747fe5cb2951d5c2d4eec2c30ba8340ffc3aa8d 100644
--- a/gpu/command_buffer/client/mapped_memory.cc
+++ b/gpu/command_buffer/client/mapped_memory.cc
@@ -7,6 +7,10 @@
#include <algorithm>
#include <functional>
+#if defined(OS_ANDROID)
+#include "base/android/sys_utils.h"
+#endif
+
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
namespace gpu {
@@ -20,7 +24,14 @@ MemoryChunk::MemoryChunk(
MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper)
: chunk_size_multiple_(1),
- helper_(helper) {
+ helper_(helper),
+ allocated_memory_(0),
+ memory_limit_(kNoLimit) {
+#if defined(OS_ANDROID)
no sievers 2013/08/14 02:05:52 can we avoid ifdefs? Can't we just pass in the lim
+ if (base::android::SysUtils::IsLowEndDevice()) {
+ memory_limit_ = kAndroidLowEndLimit;
+ }
+#endif
}
MappedMemoryManager::~MappedMemoryManager() {
@@ -36,7 +47,7 @@ void* MappedMemoryManager::Alloc(
unsigned int size, int32* shm_id, unsigned int* shm_offset) {
GPU_DCHECK(shm_id);
GPU_DCHECK(shm_offset);
- // See if any of the chucks can satisfy this request.
+ // See if any of the chunks can satisfy this request.
for (size_t ii = 0; ii < chunks_.size(); ++ii) {
MemoryChunk* chunk = chunks_[ii];
chunk->FreeUnused();
@@ -49,6 +60,22 @@ void* MappedMemoryManager::Alloc(
}
}
+ // If there is a memory limit being enforced and allocation of
+ // a new chunk would exceed the limit, try again with waiting.
+ if (memory_limit_ != kNoLimit && (allocated_memory_ + size) > memory_limit_) {
+ for (size_t ii = 0; ii < chunks_.size(); ++ii) {
+ MemoryChunk* chunk = chunks_[ii];
+ chunk->FreeUnused();
+ if (chunk->GetLargestFreeSizeWithWaiting() >= size) {
+ void* mem = chunk->Alloc(size);
+ GPU_DCHECK(mem);
+ *shm_id = chunk->shm_id();
+ *shm_offset = chunk->GetOffset(mem);
+ return mem;
+ }
+ }
+ }
+
// Make a new chunk to satisfy the request.
CommandBuffer* cmd_buf = helper_->command_buffer();
unsigned int chunk_size =
@@ -59,6 +86,7 @@ void* MappedMemoryManager::Alloc(
if (id < 0)
return NULL;
MemoryChunk* mc = new MemoryChunk(id, shm, helper_);
+ allocated_memory_ += mc->GetSize();
chunks_.push_back(mc);
void* mem = mc->Alloc(size);
GPU_DCHECK(mem);
@@ -97,6 +125,7 @@ void MappedMemoryManager::FreeUnused() {
chunk->FreeUnused();
if (!chunk->InUse()) {
cmd_buf->DestroyTransferBuffer(chunk->shm_id());
+ allocated_memory_ -= chunk->GetSize();
iter = chunks_.erase(iter);
} else {
++iter;
« no previous file with comments | « gpu/command_buffer/client/mapped_memory.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698