| OLD | NEW |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "gpu/command_buffer/client/mapped_memory.h" | 5 #include "gpu/command_buffer/client/mapped_memory.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <functional> | 8 #include <functional> |
| 9 | 9 |
| 10 #include "base/logging.h" | 10 #include "base/logging.h" |
| (...skipping 12 matching lines...) Expand all Loading... |
| 23 | 23 |
| 24 MemoryChunk::~MemoryChunk() {} | 24 MemoryChunk::~MemoryChunk() {} |
| 25 | 25 |
| 26 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper, | 26 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper, |
| 27 const base::Closure& poll_callback, | 27 const base::Closure& poll_callback, |
| 28 size_t unused_memory_reclaim_limit) | 28 size_t unused_memory_reclaim_limit) |
| 29 : chunk_size_multiple_(FencedAllocator::kAllocAlignment), | 29 : chunk_size_multiple_(FencedAllocator::kAllocAlignment), |
| 30 helper_(helper), | 30 helper_(helper), |
| 31 poll_callback_(poll_callback), | 31 poll_callback_(poll_callback), |
| 32 allocated_memory_(0), | 32 allocated_memory_(0), |
| 33 max_free_bytes_(unused_memory_reclaim_limit) { | 33 max_free_bytes_(unused_memory_reclaim_limit), |
| 34 max_allocated_bytes_(kNoLimit) { |
| 34 } | 35 } |
| 35 | 36 |
| 36 MappedMemoryManager::~MappedMemoryManager() { | 37 MappedMemoryManager::~MappedMemoryManager() { |
| 37 CommandBuffer* cmd_buf = helper_->command_buffer(); | 38 CommandBuffer* cmd_buf = helper_->command_buffer(); |
| 38 for (MemoryChunkVector::iterator iter = chunks_.begin(); | 39 for (MemoryChunkVector::iterator iter = chunks_.begin(); |
| 39 iter != chunks_.end(); ++iter) { | 40 iter != chunks_.end(); ++iter) { |
| 40 MemoryChunk* chunk = *iter; | 41 MemoryChunk* chunk = *iter; |
| 41 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); | 42 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); |
| 42 } | 43 } |
| 43 } | 44 } |
| (...skipping 30 matching lines...) Expand all Loading... |
| 74 void* mem = chunk->Alloc(size); | 75 void* mem = chunk->Alloc(size); |
| 75 DCHECK(mem); | 76 DCHECK(mem); |
| 76 *shm_id = chunk->shm_id(); | 77 *shm_id = chunk->shm_id(); |
| 77 *shm_offset = chunk->GetOffset(mem); | 78 *shm_offset = chunk->GetOffset(mem); |
| 78 return mem; | 79 return mem; |
| 79 } | 80 } |
| 80 } | 81 } |
| 81 } | 82 } |
| 82 } | 83 } |
| 83 | 84 |
| 85 if (max_allocated_bytes_ != kNoLimit && |
| 86 (allocated_memory_ + size) > max_allocated_bytes_) { |
| 87 return nullptr; |
| 88 } |
| 89 |
| 84 // Make a new chunk to satisfy the request. | 90 // Make a new chunk to satisfy the request. |
| 85 CommandBuffer* cmd_buf = helper_->command_buffer(); | 91 CommandBuffer* cmd_buf = helper_->command_buffer(); |
| 86 unsigned int chunk_size = | 92 unsigned int chunk_size = |
| 87 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) * | 93 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) * |
| 88 chunk_size_multiple_; | 94 chunk_size_multiple_; |
| 89 int32 id = -1; | 95 int32 id = -1; |
| 90 scoped_refptr<gpu::Buffer> shm = | 96 scoped_refptr<gpu::Buffer> shm = |
| 91 cmd_buf->CreateTransferBuffer(chunk_size, &id); | 97 cmd_buf->CreateTransferBuffer(chunk_size, &id); |
| 92 if (id < 0) | 98 if (id < 0) |
| 93 return NULL; | 99 return NULL; |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 133 if (!chunk->InUse()) { | 139 if (!chunk->InUse()) { |
| 134 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); | 140 cmd_buf->DestroyTransferBuffer(chunk->shm_id()); |
| 135 allocated_memory_ -= chunk->GetSize(); | 141 allocated_memory_ -= chunk->GetSize(); |
| 136 iter = chunks_.erase(iter); | 142 iter = chunks_.erase(iter); |
| 137 } else { | 143 } else { |
| 138 ++iter; | 144 ++iter; |
| 139 } | 145 } |
| 140 } | 146 } |
| 141 } | 147 } |
| 142 | 148 |
| 149 void ScopedMappedMemoryPtr::Release() { |
| 150 if (buffer_) { |
| 151 mapped_memory_manager_->FreePendingToken(buffer_, helper_->InsertToken()); |
| 152 buffer_ = nullptr; |
| 153 size_ = 0; |
| 154 shm_id_ = 0; |
| 155 shm_offset_ = 0; |
| 156 |
| 157 if (flush_after_release_) |
| 158 helper_->CommandBufferHelper::Flush(); |
| 159 } |
| 160 } |
| 161 |
| 162 void ScopedMappedMemoryPtr::Reset(uint32_t new_size) { |
| 163 Release(); |
| 164 |
| 165 if (new_size) { |
| 166 buffer_ = mapped_memory_manager_->Alloc(new_size, &shm_id_, &shm_offset_); |
| 167 size_ = buffer_ ? new_size : 0; |
| 168 } |
| 169 } |
| 170 |
| 143 } // namespace gpu | 171 } // namespace gpu |
| OLD | NEW |