| OLD | NEW |
| (Empty) |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/common/host_discardable_shared_memory_manager.h" | |
| 6 | |
| 7 #include <algorithm> | |
| 8 #include <utility> | |
| 9 | |
| 10 #include "base/atomic_sequence_num.h" | |
| 11 #include "base/bind.h" | |
| 12 #include "base/callback.h" | |
| 13 #include "base/debug/crash_logging.h" | |
| 14 #include "base/lazy_instance.h" | |
| 15 #include "base/macros.h" | |
| 16 #include "base/memory/discardable_memory.h" | |
| 17 #include "base/memory/memory_coordinator_client_registry.h" | |
| 18 #include "base/memory/ptr_util.h" | |
| 19 #include "base/numerics/safe_math.h" | |
| 20 #include "base/process/memory.h" | |
| 21 #include "base/strings/string_number_conversions.h" | |
| 22 #include "base/strings/stringprintf.h" | |
| 23 #include "base/sys_info.h" | |
| 24 #include "base/threading/thread_task_runner_handle.h" | |
| 25 #include "base/trace_event/memory_allocator_dump.h" | |
| 26 #include "base/trace_event/memory_dump_manager.h" | |
| 27 #include "base/trace_event/process_memory_dump.h" | |
| 28 #include "base/trace_event/trace_event.h" | |
| 29 #include "build/build_config.h" | |
| 30 #include "content/common/child_process_host_impl.h" | |
| 31 #include "content/common/discardable_shared_memory_heap.h" | |
| 32 #include "content/public/common/child_process_host.h" | |
| 33 | |
| 34 #if defined(OS_LINUX) | |
| 35 #include "base/files/file_path.h" | |
| 36 #include "base/files/file_util.h" | |
| 37 #include "base/metrics/histogram_macros.h" | |
| 38 #endif | |
| 39 | |
| 40 namespace content { | |
| 41 namespace { | |
| 42 | |
| 43 class DiscardableMemoryImpl : public base::DiscardableMemory { | |
| 44 public: | |
| 45 DiscardableMemoryImpl( | |
| 46 std::unique_ptr<base::DiscardableSharedMemory> shared_memory, | |
| 47 const base::Closure& deleted_callback) | |
| 48 : shared_memory_(std::move(shared_memory)), | |
| 49 deleted_callback_(deleted_callback), | |
| 50 is_locked_(true) {} | |
| 51 | |
| 52 ~DiscardableMemoryImpl() override { | |
| 53 if (is_locked_) | |
| 54 shared_memory_->Unlock(0, 0); | |
| 55 | |
| 56 deleted_callback_.Run(); | |
| 57 } | |
| 58 | |
| 59 // Overridden from base::DiscardableMemory: | |
| 60 bool Lock() override { | |
| 61 DCHECK(!is_locked_); | |
| 62 | |
| 63 if (shared_memory_->Lock(0, 0) != base::DiscardableSharedMemory::SUCCESS) | |
| 64 return false; | |
| 65 | |
| 66 is_locked_ = true; | |
| 67 return true; | |
| 68 } | |
| 69 void Unlock() override { | |
| 70 DCHECK(is_locked_); | |
| 71 | |
| 72 shared_memory_->Unlock(0, 0); | |
| 73 is_locked_ = false; | |
| 74 } | |
| 75 void* data() const override { | |
| 76 DCHECK(is_locked_); | |
| 77 return shared_memory_->memory(); | |
| 78 } | |
| 79 | |
| 80 base::trace_event::MemoryAllocatorDump* CreateMemoryAllocatorDump( | |
| 81 const char* name, | |
| 82 base::trace_event::ProcessMemoryDump* pmd) const override { | |
| 83 // The memory could have been purged, but we still create a dump with | |
| 84 // mapped_size. So, the size can be inaccurate. | |
| 85 base::trace_event::MemoryAllocatorDump* dump = | |
| 86 pmd->CreateAllocatorDump(name); | |
| 87 dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, | |
| 88 base::trace_event::MemoryAllocatorDump::kUnitsBytes, | |
| 89 shared_memory_->mapped_size()); | |
| 90 return dump; | |
| 91 } | |
| 92 | |
| 93 private: | |
| 94 std::unique_ptr<base::DiscardableSharedMemory> shared_memory_; | |
| 95 const base::Closure deleted_callback_; | |
| 96 bool is_locked_; | |
| 97 | |
| 98 DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryImpl); | |
| 99 }; | |
| 100 | |
| 101 // Returns the default memory limit to use for discardable memory, taking | |
| 102 // the amount physical memory available and other platform specific constraints | |
| 103 // into account. | |
| 104 int64_t GetDefaultMemoryLimit() { | |
| 105 const int kMegabyte = 1024 * 1024; | |
| 106 | |
| 107 #if defined(OS_ANDROID) | |
| 108 // Limits the number of FDs used to 32, assuming a 4MB allocation size. | |
| 109 int64_t max_default_memory_limit = 128 * kMegabyte; | |
| 110 #else | |
| 111 int64_t max_default_memory_limit = 512 * kMegabyte; | |
| 112 #endif | |
| 113 | |
| 114 // Use 1/8th of discardable memory on low-end devices. | |
| 115 if (base::SysInfo::IsLowEndDevice()) | |
| 116 max_default_memory_limit /= 8; | |
| 117 | |
| 118 #if defined(OS_LINUX) | |
| 119 base::FilePath shmem_dir; | |
| 120 if (base::GetShmemTempDir(false, &shmem_dir)) { | |
| 121 int64_t shmem_dir_amount_of_free_space = | |
| 122 base::SysInfo::AmountOfFreeDiskSpace(shmem_dir); | |
| 123 DCHECK_GT(shmem_dir_amount_of_free_space, 0); | |
| 124 int64_t shmem_dir_amount_of_free_space_mb = | |
| 125 shmem_dir_amount_of_free_space / kMegabyte; | |
| 126 | |
| 127 UMA_HISTOGRAM_CUSTOM_COUNTS("Memory.ShmemDir.AmountOfFreeSpace", | |
| 128 shmem_dir_amount_of_free_space_mb, 1, | |
| 129 4 * 1024, // 4 GB | |
| 130 50); | |
| 131 | |
| 132 if (shmem_dir_amount_of_free_space_mb < 64) { | |
| 133 LOG(WARNING) << "Less than 64MB of free space in temporary directory for " | |
| 134 "shared memory files: " | |
| 135 << shmem_dir_amount_of_free_space_mb; | |
| 136 } | |
| 137 | |
| 138 // Allow 1/2 of available shmem dir space to be used for discardable memory. | |
| 139 max_default_memory_limit = | |
| 140 std::min(max_default_memory_limit, shmem_dir_amount_of_free_space / 2); | |
| 141 } | |
| 142 #endif | |
| 143 | |
| 144 // Allow 25% of physical memory to be used for discardable memory. | |
| 145 return std::min(max_default_memory_limit, | |
| 146 base::SysInfo::AmountOfPhysicalMemory() / 4); | |
| 147 } | |
| 148 | |
| 149 base::LazyInstance<HostDiscardableSharedMemoryManager> | |
| 150 g_discardable_shared_memory_manager = LAZY_INSTANCE_INITIALIZER; | |
| 151 | |
| 152 const int kEnforceMemoryPolicyDelayMs = 1000; | |
| 153 | |
| 154 // Global atomic to generate unique discardable shared memory IDs. | |
| 155 base::StaticAtomicSequenceNumber g_next_discardable_shared_memory_id; | |
| 156 | |
| 157 } // namespace | |
| 158 | |
| 159 HostDiscardableSharedMemoryManager::MemorySegment::MemorySegment( | |
| 160 std::unique_ptr<base::DiscardableSharedMemory> memory) | |
| 161 : memory_(std::move(memory)) {} | |
| 162 | |
| 163 HostDiscardableSharedMemoryManager::MemorySegment::~MemorySegment() { | |
| 164 } | |
| 165 | |
| 166 HostDiscardableSharedMemoryManager::HostDiscardableSharedMemoryManager() | |
| 167 : default_memory_limit_(GetDefaultMemoryLimit()), | |
| 168 memory_limit_(default_memory_limit_), | |
| 169 bytes_allocated_(0), | |
| 170 memory_pressure_listener_(new base::MemoryPressureListener( | |
| 171 base::Bind(&HostDiscardableSharedMemoryManager::OnMemoryPressure, | |
| 172 base::Unretained(this)))), | |
| 173 // Current thread might not have a task runner in tests. | |
| 174 enforce_memory_policy_task_runner_(base::ThreadTaskRunnerHandle::Get()), | |
| 175 enforce_memory_policy_pending_(false), | |
| 176 weak_ptr_factory_(this) { | |
| 177 DCHECK_NE(memory_limit_, 0u); | |
| 178 enforce_memory_policy_callback_ = | |
| 179 base::Bind(&HostDiscardableSharedMemoryManager::EnforceMemoryPolicy, | |
| 180 weak_ptr_factory_.GetWeakPtr()); | |
| 181 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( | |
| 182 this, "HostDiscardableSharedMemoryManager", | |
| 183 base::ThreadTaskRunnerHandle::Get()); | |
| 184 base::MemoryCoordinatorClientRegistry::GetInstance()->Register(this); | |
| 185 } | |
| 186 | |
| 187 HostDiscardableSharedMemoryManager::~HostDiscardableSharedMemoryManager() { | |
| 188 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( | |
| 189 this); | |
| 190 } | |
| 191 | |
| 192 HostDiscardableSharedMemoryManager* | |
| 193 HostDiscardableSharedMemoryManager::current() { | |
| 194 return g_discardable_shared_memory_manager.Pointer(); | |
| 195 } | |
| 196 | |
| 197 std::unique_ptr<base::DiscardableMemory> | |
| 198 HostDiscardableSharedMemoryManager::AllocateLockedDiscardableMemory( | |
| 199 size_t size) { | |
| 200 DCHECK_NE(size, 0u); | |
| 201 | |
| 202 DiscardableSharedMemoryId new_id = | |
| 203 g_next_discardable_shared_memory_id.GetNext(); | |
| 204 base::ProcessHandle current_process_handle = base::GetCurrentProcessHandle(); | |
| 205 | |
| 206 // Note: Use DiscardableSharedMemoryHeap for in-process allocation | |
| 207 // of discardable memory if the cost of each allocation is too high. | |
| 208 base::SharedMemoryHandle handle; | |
| 209 AllocateLockedDiscardableSharedMemory(current_process_handle, | |
| 210 ChildProcessHost::kInvalidUniqueID, | |
| 211 size, new_id, &handle); | |
| 212 std::unique_ptr<base::DiscardableSharedMemory> memory( | |
| 213 new base::DiscardableSharedMemory(handle)); | |
| 214 if (!memory->Map(size)) | |
| 215 base::TerminateBecauseOutOfMemory(size); | |
| 216 // Close file descriptor to avoid running out. | |
| 217 memory->Close(); | |
| 218 return base::MakeUnique<DiscardableMemoryImpl>( | |
| 219 std::move(memory), | |
| 220 base::Bind( | |
| 221 &HostDiscardableSharedMemoryManager::DeletedDiscardableSharedMemory, | |
| 222 base::Unretained(this), new_id, ChildProcessHost::kInvalidUniqueID)); | |
| 223 } | |
| 224 | |
| 225 bool HostDiscardableSharedMemoryManager::OnMemoryDump( | |
| 226 const base::trace_event::MemoryDumpArgs& args, | |
| 227 base::trace_event::ProcessMemoryDump* pmd) { | |
| 228 if (args.level_of_detail == | |
| 229 base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND) { | |
| 230 base::trace_event::MemoryAllocatorDump* total_dump = | |
| 231 pmd->CreateAllocatorDump("discardable"); | |
| 232 total_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, | |
| 233 base::trace_event::MemoryAllocatorDump::kUnitsBytes, | |
| 234 GetBytesAllocated()); | |
| 235 return true; | |
| 236 } | |
| 237 | |
| 238 base::AutoLock lock(lock_); | |
| 239 for (const auto& process_entry : processes_) { | |
| 240 const int child_process_id = process_entry.first; | |
| 241 const MemorySegmentMap& process_segments = process_entry.second; | |
| 242 for (const auto& segment_entry : process_segments) { | |
| 243 const int segment_id = segment_entry.first; | |
| 244 const MemorySegment* segment = segment_entry.second.get(); | |
| 245 if (!segment->memory()->mapped_size()) | |
| 246 continue; | |
| 247 | |
| 248 // The "size" will be inherited form the shared global dump. | |
| 249 std::string dump_name = base::StringPrintf( | |
| 250 "discardable/process_%x/segment_%d", child_process_id, segment_id); | |
| 251 base::trace_event::MemoryAllocatorDump* dump = | |
| 252 pmd->CreateAllocatorDump(dump_name); | |
| 253 | |
| 254 dump->AddScalar("virtual_size", | |
| 255 base::trace_event::MemoryAllocatorDump::kUnitsBytes, | |
| 256 segment->memory()->mapped_size()); | |
| 257 | |
| 258 // Host can only tell if whole segment is locked or not. | |
| 259 dump->AddScalar( | |
| 260 "locked_size", base::trace_event::MemoryAllocatorDump::kUnitsBytes, | |
| 261 segment->memory()->IsMemoryLocked() ? segment->memory()->mapped_size() | |
| 262 : 0u); | |
| 263 | |
| 264 // Create the cross-process ownership edge. If the child creates a | |
| 265 // corresponding dump for the same segment, this will avoid to | |
| 266 // double-count them in tracing. If, instead, no other process will emit a | |
| 267 // dump with the same guid, the segment will be accounted to the browser. | |
| 268 const uint64_t child_tracing_process_id = | |
| 269 ChildProcessHostImpl::ChildProcessUniqueIdToTracingProcessId( | |
| 270 child_process_id); | |
| 271 base::trace_event::MemoryAllocatorDumpGuid shared_segment_guid = | |
| 272 DiscardableSharedMemoryHeap::GetSegmentGUIDForTracing( | |
| 273 child_tracing_process_id, segment_id); | |
| 274 pmd->CreateSharedGlobalAllocatorDump(shared_segment_guid); | |
| 275 pmd->AddOwnershipEdge(dump->guid(), shared_segment_guid); | |
| 276 | |
| 277 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED) | |
| 278 if (args.level_of_detail == | |
| 279 base::trace_event::MemoryDumpLevelOfDetail::DETAILED) { | |
| 280 size_t resident_size = | |
| 281 base::trace_event::ProcessMemoryDump::CountResidentBytes( | |
| 282 segment->memory()->memory(), segment->memory()->mapped_size()); | |
| 283 | |
| 284 // This is added to the global dump since it has to be attributed to | |
| 285 // both the allocator dumps involved. | |
| 286 pmd->GetSharedGlobalAllocatorDump(shared_segment_guid) | |
| 287 ->AddScalar("resident_size", | |
| 288 base::trace_event::MemoryAllocatorDump::kUnitsBytes, | |
| 289 static_cast<uint64_t>(resident_size)); | |
| 290 } | |
| 291 #endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED) | |
| 292 } | |
| 293 } | |
| 294 return true; | |
| 295 } | |
| 296 | |
| 297 void HostDiscardableSharedMemoryManager:: | |
| 298 AllocateLockedDiscardableSharedMemoryForChild( | |
| 299 base::ProcessHandle process_handle, | |
| 300 int child_process_id, | |
| 301 size_t size, | |
| 302 DiscardableSharedMemoryId id, | |
| 303 base::SharedMemoryHandle* shared_memory_handle) { | |
| 304 AllocateLockedDiscardableSharedMemory(process_handle, child_process_id, size, | |
| 305 id, shared_memory_handle); | |
| 306 } | |
| 307 | |
| 308 void HostDiscardableSharedMemoryManager::ChildDeletedDiscardableSharedMemory( | |
| 309 DiscardableSharedMemoryId id, | |
| 310 int child_process_id) { | |
| 311 DeletedDiscardableSharedMemory(id, child_process_id); | |
| 312 } | |
| 313 | |
| 314 void HostDiscardableSharedMemoryManager::ProcessRemoved(int child_process_id) { | |
| 315 base::AutoLock lock(lock_); | |
| 316 | |
| 317 ProcessMap::iterator process_it = processes_.find(child_process_id); | |
| 318 if (process_it == processes_.end()) | |
| 319 return; | |
| 320 | |
| 321 size_t bytes_allocated_before_releasing_memory = bytes_allocated_; | |
| 322 | |
| 323 for (auto& segment_it : process_it->second) | |
| 324 ReleaseMemory(segment_it.second->memory()); | |
| 325 | |
| 326 processes_.erase(process_it); | |
| 327 | |
| 328 if (bytes_allocated_ != bytes_allocated_before_releasing_memory) | |
| 329 BytesAllocatedChanged(bytes_allocated_); | |
| 330 } | |
| 331 | |
| 332 void HostDiscardableSharedMemoryManager::SetMemoryLimit(size_t limit) { | |
| 333 base::AutoLock lock(lock_); | |
| 334 | |
| 335 memory_limit_ = limit; | |
| 336 ReduceMemoryUsageUntilWithinMemoryLimit(); | |
| 337 } | |
| 338 | |
| 339 void HostDiscardableSharedMemoryManager::EnforceMemoryPolicy() { | |
| 340 base::AutoLock lock(lock_); | |
| 341 | |
| 342 enforce_memory_policy_pending_ = false; | |
| 343 ReduceMemoryUsageUntilWithinMemoryLimit(); | |
| 344 } | |
| 345 | |
| 346 size_t HostDiscardableSharedMemoryManager::GetBytesAllocated() { | |
| 347 base::AutoLock lock(lock_); | |
| 348 | |
| 349 return bytes_allocated_; | |
| 350 } | |
| 351 | |
| 352 void HostDiscardableSharedMemoryManager::OnMemoryStateChange( | |
| 353 base::MemoryState state) { | |
| 354 switch (state) { | |
| 355 case base::MemoryState::NORMAL: | |
| 356 SetMemoryLimit(default_memory_limit_); | |
| 357 break; | |
| 358 case base::MemoryState::THROTTLED: | |
| 359 SetMemoryLimit(0); | |
| 360 break; | |
| 361 case base::MemoryState::SUSPENDED: | |
| 362 // Note that SUSPENDED never occurs in the main browser process so far. | |
| 363 // Fall through. | |
| 364 case base::MemoryState::UNKNOWN: | |
| 365 NOTREACHED(); | |
| 366 break; | |
| 367 } | |
| 368 } | |
| 369 | |
| 370 void HostDiscardableSharedMemoryManager::AllocateLockedDiscardableSharedMemory( | |
| 371 base::ProcessHandle process_handle, | |
| 372 int client_process_id, | |
| 373 size_t size, | |
| 374 DiscardableSharedMemoryId id, | |
| 375 base::SharedMemoryHandle* shared_memory_handle) { | |
| 376 base::AutoLock lock(lock_); | |
| 377 | |
| 378 // Make sure |id| is not already in use. | |
| 379 MemorySegmentMap& process_segments = processes_[client_process_id]; | |
| 380 if (process_segments.find(id) != process_segments.end()) { | |
| 381 LOG(ERROR) << "Invalid discardable shared memory ID"; | |
| 382 *shared_memory_handle = base::SharedMemory::NULLHandle(); | |
| 383 return; | |
| 384 } | |
| 385 | |
| 386 // Memory usage must be reduced to prevent the addition of |size| from | |
| 387 // taking usage above the limit. Usage should be reduced to 0 in cases | |
| 388 // where |size| is greater than the limit. | |
| 389 size_t limit = 0; | |
| 390 // Note: the actual mapped size can be larger than requested and cause | |
| 391 // |bytes_allocated_| to temporarily be larger than |memory_limit_|. The | |
| 392 // error is minimized by incrementing |bytes_allocated_| with the actual | |
| 393 // mapped size rather than |size| below. | |
| 394 if (size < memory_limit_) | |
| 395 limit = memory_limit_ - size; | |
| 396 | |
| 397 if (bytes_allocated_ > limit) | |
| 398 ReduceMemoryUsageUntilWithinLimit(limit); | |
| 399 | |
| 400 std::unique_ptr<base::DiscardableSharedMemory> memory( | |
| 401 new base::DiscardableSharedMemory); | |
| 402 if (!memory->CreateAndMap(size)) { | |
| 403 *shared_memory_handle = base::SharedMemory::NULLHandle(); | |
| 404 return; | |
| 405 } | |
| 406 | |
| 407 if (!memory->ShareToProcess(process_handle, shared_memory_handle)) { | |
| 408 LOG(ERROR) << "Cannot share discardable memory segment"; | |
| 409 *shared_memory_handle = base::SharedMemory::NULLHandle(); | |
| 410 return; | |
| 411 } | |
| 412 | |
| 413 // Close file descriptor to avoid running out. | |
| 414 memory->Close(); | |
| 415 | |
| 416 base::CheckedNumeric<size_t> checked_bytes_allocated = bytes_allocated_; | |
| 417 checked_bytes_allocated += memory->mapped_size(); | |
| 418 if (!checked_bytes_allocated.IsValid()) { | |
| 419 *shared_memory_handle = base::SharedMemory::NULLHandle(); | |
| 420 return; | |
| 421 } | |
| 422 | |
| 423 bytes_allocated_ = checked_bytes_allocated.ValueOrDie(); | |
| 424 BytesAllocatedChanged(bytes_allocated_); | |
| 425 | |
| 426 scoped_refptr<MemorySegment> segment(new MemorySegment(std::move(memory))); | |
| 427 process_segments[id] = segment.get(); | |
| 428 segments_.push_back(segment.get()); | |
| 429 std::push_heap(segments_.begin(), segments_.end(), CompareMemoryUsageTime); | |
| 430 | |
| 431 if (bytes_allocated_ > memory_limit_) | |
| 432 ScheduleEnforceMemoryPolicy(); | |
| 433 } | |
| 434 | |
| 435 void HostDiscardableSharedMemoryManager::DeletedDiscardableSharedMemory( | |
| 436 DiscardableSharedMemoryId id, | |
| 437 int client_process_id) { | |
| 438 base::AutoLock lock(lock_); | |
| 439 | |
| 440 MemorySegmentMap& process_segments = processes_[client_process_id]; | |
| 441 | |
| 442 MemorySegmentMap::iterator segment_it = process_segments.find(id); | |
| 443 if (segment_it == process_segments.end()) { | |
| 444 LOG(ERROR) << "Invalid discardable shared memory ID"; | |
| 445 return; | |
| 446 } | |
| 447 | |
| 448 size_t bytes_allocated_before_releasing_memory = bytes_allocated_; | |
| 449 | |
| 450 ReleaseMemory(segment_it->second->memory()); | |
| 451 | |
| 452 process_segments.erase(segment_it); | |
| 453 | |
| 454 if (bytes_allocated_ != bytes_allocated_before_releasing_memory) | |
| 455 BytesAllocatedChanged(bytes_allocated_); | |
| 456 } | |
| 457 | |
| 458 void HostDiscardableSharedMemoryManager::OnMemoryPressure( | |
| 459 base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level) { | |
| 460 base::AutoLock lock(lock_); | |
| 461 | |
| 462 switch (memory_pressure_level) { | |
| 463 case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE: | |
| 464 break; | |
| 465 case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE: | |
| 466 // Purge memory until usage is within half of |memory_limit_|. | |
| 467 ReduceMemoryUsageUntilWithinLimit(memory_limit_ / 2); | |
| 468 break; | |
| 469 case base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL: | |
| 470 // Purge everything possible when pressure is critical. | |
| 471 ReduceMemoryUsageUntilWithinLimit(0); | |
| 472 break; | |
| 473 } | |
| 474 } | |
| 475 | |
| 476 void | |
| 477 HostDiscardableSharedMemoryManager::ReduceMemoryUsageUntilWithinMemoryLimit() { | |
| 478 lock_.AssertAcquired(); | |
| 479 | |
| 480 if (bytes_allocated_ <= memory_limit_) | |
| 481 return; | |
| 482 | |
| 483 ReduceMemoryUsageUntilWithinLimit(memory_limit_); | |
| 484 if (bytes_allocated_ > memory_limit_) | |
| 485 ScheduleEnforceMemoryPolicy(); | |
| 486 } | |
| 487 | |
| 488 void HostDiscardableSharedMemoryManager::ReduceMemoryUsageUntilWithinLimit( | |
| 489 size_t limit) { | |
| 490 TRACE_EVENT1("renderer_host", | |
| 491 "HostDiscardableSharedMemoryManager::" | |
| 492 "ReduceMemoryUsageUntilWithinLimit", | |
| 493 "bytes_allocated", | |
| 494 bytes_allocated_); | |
| 495 | |
| 496 // Usage time of currently locked segments are updated to this time and | |
| 497 // we stop eviction attempts as soon as we come across a segment that we've | |
| 498 // previously tried to evict but was locked. | |
| 499 base::Time current_time = Now(); | |
| 500 | |
| 501 lock_.AssertAcquired(); | |
| 502 size_t bytes_allocated_before_purging = bytes_allocated_; | |
| 503 while (!segments_.empty()) { | |
| 504 if (bytes_allocated_ <= limit) | |
| 505 break; | |
| 506 | |
| 507 // Stop eviction attempts when the LRU segment is currently in use. | |
| 508 if (segments_.front()->memory()->last_known_usage() >= current_time) | |
| 509 break; | |
| 510 | |
| 511 std::pop_heap(segments_.begin(), segments_.end(), CompareMemoryUsageTime); | |
| 512 scoped_refptr<MemorySegment> segment = segments_.back(); | |
| 513 segments_.pop_back(); | |
| 514 | |
| 515 // Simply drop the reference and continue if memory has already been | |
| 516 // unmapped. This happens when a memory segment has been deleted by | |
| 517 // the client. | |
| 518 if (!segment->memory()->mapped_size()) | |
| 519 continue; | |
| 520 | |
| 521 // Attempt to purge LRU segment. When successful, released the memory. | |
| 522 if (segment->memory()->Purge(current_time)) { | |
| 523 ReleaseMemory(segment->memory()); | |
| 524 continue; | |
| 525 } | |
| 526 | |
| 527 // Add memory segment (with updated usage timestamp) back on heap after | |
| 528 // failed attempt to purge it. | |
| 529 segments_.push_back(segment.get()); | |
| 530 std::push_heap(segments_.begin(), segments_.end(), CompareMemoryUsageTime); | |
| 531 } | |
| 532 | |
| 533 if (bytes_allocated_ != bytes_allocated_before_purging) | |
| 534 BytesAllocatedChanged(bytes_allocated_); | |
| 535 } | |
| 536 | |
| 537 void HostDiscardableSharedMemoryManager::ReleaseMemory( | |
| 538 base::DiscardableSharedMemory* memory) { | |
| 539 lock_.AssertAcquired(); | |
| 540 | |
| 541 size_t size = memory->mapped_size(); | |
| 542 DCHECK_GE(bytes_allocated_, size); | |
| 543 bytes_allocated_ -= size; | |
| 544 | |
| 545 // This will unmap the memory segment and drop our reference. The result | |
| 546 // is that the memory will be released to the OS if the child process is | |
| 547 // no longer referencing it. | |
| 548 // Note: We intentionally leave the segment in the |segments| vector to | |
| 549 // avoid reconstructing the heap. The element will be removed from the heap | |
| 550 // when its last usage time is older than all other segments. | |
| 551 memory->Unmap(); | |
| 552 memory->Close(); | |
| 553 } | |
| 554 | |
| 555 void HostDiscardableSharedMemoryManager::BytesAllocatedChanged( | |
| 556 size_t new_bytes_allocated) const { | |
| 557 static const char kTotalDiscardableMemoryAllocatedKey[] = | |
| 558 "total-discardable-memory-allocated"; | |
| 559 base::debug::SetCrashKeyValue(kTotalDiscardableMemoryAllocatedKey, | |
| 560 base::Uint64ToString(new_bytes_allocated)); | |
| 561 } | |
| 562 | |
| 563 base::Time HostDiscardableSharedMemoryManager::Now() const { | |
| 564 return base::Time::Now(); | |
| 565 } | |
| 566 | |
| 567 void HostDiscardableSharedMemoryManager::ScheduleEnforceMemoryPolicy() { | |
| 568 lock_.AssertAcquired(); | |
| 569 | |
| 570 if (enforce_memory_policy_pending_) | |
| 571 return; | |
| 572 | |
| 573 enforce_memory_policy_pending_ = true; | |
| 574 DCHECK(enforce_memory_policy_task_runner_); | |
| 575 enforce_memory_policy_task_runner_->PostDelayedTask( | |
| 576 FROM_HERE, enforce_memory_policy_callback_, | |
| 577 base::TimeDelta::FromMilliseconds(kEnforceMemoryPolicyDelayMs)); | |
| 578 } | |
| 579 | |
| 580 } // namespace content | |
| OLD | NEW |