Index: trunk/src/content/common/gpu/gpu_memory_manager.cc |
=================================================================== |
--- trunk/src/content/common/gpu/gpu_memory_manager.cc (revision 274878) |
+++ trunk/src/content/common/gpu/gpu_memory_manager.cc (working copy) |
@@ -35,22 +35,72 @@ |
*total_size += (new_size - old_size); |
} |
+template<typename T> |
+T RoundUp(T n, T mul) { |
+ return ((n + mul - 1) / mul) * mul; |
} |
+template<typename T> |
+T RoundDown(T n, T mul) { |
+ return (n / mul) * mul; |
+} |
+ |
+} |
+ |
GpuMemoryManager::GpuMemoryManager( |
GpuChannelManager* channel_manager, |
uint64 max_surfaces_with_frontbuffer_soft_limit) |
: channel_manager_(channel_manager), |
manage_immediate_scheduled_(false), |
- disable_schedule_manage_(false), |
max_surfaces_with_frontbuffer_soft_limit_( |
max_surfaces_with_frontbuffer_soft_limit), |
- client_hard_limit_bytes_(0), |
+ priority_cutoff_(MemoryAllocation::CUTOFF_ALLOW_EVERYTHING), |
+ bytes_available_gpu_memory_(0), |
+ bytes_available_gpu_memory_overridden_(false), |
+ bytes_minimum_per_client_(0), |
+ bytes_default_per_client_(0), |
bytes_allocated_managed_current_(0), |
bytes_allocated_unmanaged_current_(0), |
- bytes_allocated_historical_max_(0) |
-{ } |
+ bytes_allocated_historical_max_(0), |
+ bytes_allocated_unmanaged_high_(0), |
+ bytes_allocated_unmanaged_low_(0), |
+ bytes_unmanaged_limit_step_(kBytesAllocatedUnmanagedStep), |
+ disable_schedule_manage_(false) |
+{ |
+ CommandLine* command_line = CommandLine::ForCurrentProcess(); |
+ // Use a more conservative memory allocation policy on Linux and Mac because |
+ // the platform is unstable when under memory pressure. |
+ // http://crbug.com/145600 (Linux) |
+ // http://crbug.com/141377 (Mac) |
+#if defined(OS_MACOSX) || (defined(OS_LINUX) && !defined(OS_CHROMEOS)) |
+ priority_cutoff_ = MemoryAllocation::CUTOFF_ALLOW_NICE_TO_HAVE; |
+#endif |
+ |
+#if defined(OS_ANDROID) |
+ bytes_default_per_client_ = 8 * 1024 * 1024; |
+ bytes_minimum_per_client_ = 8 * 1024 * 1024; |
+#elif defined(OS_CHROMEOS) |
+ bytes_default_per_client_ = 64 * 1024 * 1024; |
+ bytes_minimum_per_client_ = 4 * 1024 * 1024; |
+#elif defined(OS_MACOSX) |
+ bytes_default_per_client_ = 128 * 1024 * 1024; |
+ bytes_minimum_per_client_ = 128 * 1024 * 1024; |
+#else |
+ bytes_default_per_client_ = 64 * 1024 * 1024; |
+ bytes_minimum_per_client_ = 64 * 1024 * 1024; |
+#endif |
+ |
+ if (command_line->HasSwitch(switches::kForceGpuMemAvailableMb)) { |
+ base::StringToUint64( |
+ command_line->GetSwitchValueASCII(switches::kForceGpuMemAvailableMb), |
+ &bytes_available_gpu_memory_); |
+ bytes_available_gpu_memory_ *= 1024 * 1024; |
+ bytes_available_gpu_memory_overridden_ = true; |
+ } else |
+ bytes_available_gpu_memory_ = GetDefaultAvailableGpuMemory(); |
+} |
+ |
GpuMemoryManager::~GpuMemoryManager() { |
DCHECK(tracking_groups_.empty()); |
DCHECK(clients_visible_mru_.empty()); |
@@ -60,19 +110,60 @@ |
DCHECK(!bytes_allocated_unmanaged_current_); |
} |
+uint64 GpuMemoryManager::GetAvailableGpuMemory() const { |
+ // Allow unmanaged allocations to over-subscribe by at most (high_ - low_) |
+ // before restricting managed (compositor) memory based on unmanaged usage. |
+ if (bytes_allocated_unmanaged_low_ > bytes_available_gpu_memory_) |
+ return 0; |
+ return bytes_available_gpu_memory_ - bytes_allocated_unmanaged_low_; |
+} |
+ |
+uint64 GpuMemoryManager::GetDefaultAvailableGpuMemory() const { |
+#if defined(OS_ANDROID) |
+ return 16 * 1024 * 1024; |
+#elif defined(OS_CHROMEOS) |
+ return 1024 * 1024 * 1024; |
+#else |
+ return 256 * 1024 * 1024; |
+#endif |
+} |
+ |
+uint64 GpuMemoryManager::GetMaximumTotalGpuMemory() const { |
+#if defined(OS_ANDROID) |
+ return 256 * 1024 * 1024; |
+#else |
+ return 1024 * 1024 * 1024; |
+#endif |
+} |
+ |
+uint64 GpuMemoryManager::GetMaximumClientAllocation() const { |
+#if defined(OS_ANDROID) || defined(OS_CHROMEOS) |
+ return bytes_available_gpu_memory_; |
+#else |
+ // This is to avoid allowing a single page on to use a full 256MB of memory |
+ // (the current total limit). Long-scroll pages will hit this limit, |
+ // resulting in instability on some platforms (e.g, issue 141377). |
+ return bytes_available_gpu_memory_ / 2; |
+#endif |
+} |
+ |
+uint64 GpuMemoryManager::CalcAvailableFromGpuTotal(uint64 total_gpu_memory) { |
+#if defined(OS_ANDROID) |
+ // We don't need to reduce the total on Android, since |
+ // the total is an estimate to begin with. |
+ return total_gpu_memory; |
+#else |
+ // Allow Chrome to use 75% of total GPU memory, or all-but-64MB of GPU |
+ // memory, whichever is less. |
+ return std::min(3 * total_gpu_memory / 4, total_gpu_memory - 64*1024*1024); |
+#endif |
+} |
+ |
void GpuMemoryManager::UpdateAvailableGpuMemory() { |
- // If the value was overridden on the command line, use the specified value. |
- static bool client_hard_limit_bytes_overridden = |
- CommandLine::ForCurrentProcess()->HasSwitch( |
- switches::kForceGpuMemAvailableMb); |
- if (client_hard_limit_bytes_overridden) { |
- base::StringToUint64( |
- CommandLine::ForCurrentProcess()->GetSwitchValueASCII( |
- switches::kForceGpuMemAvailableMb), |
- &client_hard_limit_bytes_); |
- client_hard_limit_bytes_ *= 1024 * 1024; |
+ // If the amount of video memory to use was specified at the command |
+ // line, never change it. |
+ if (bytes_available_gpu_memory_overridden_) |
return; |
- } |
// On non-Android, we use an operating system query when possible. |
// We do not have a reliable concept of multiple GPUs existing in |
@@ -100,21 +191,29 @@ |
if (!bytes_min) |
return; |
- client_hard_limit_bytes_ = bytes_min; |
+ bytes_available_gpu_memory_ = CalcAvailableFromGpuTotal(bytes_min); |
-#if defined(OS_ANDROID) |
- // Clamp the observed value to a specific range on Android. |
- client_hard_limit_bytes_ = std::max(client_hard_limit_bytes_, |
- static_cast<uint64>(16 * 1024 * 1024)); |
- client_hard_limit_bytes_ = std::min(client_hard_limit_bytes_, |
- static_cast<uint64>(256 * 1024 * 1024)); |
-#else |
- // Ignore what the system said and give all clients the same maximum |
- // allocation on desktop platforms. |
- client_hard_limit_bytes_ = 256 * 1024 * 1024; |
-#endif |
+ // Never go below the default allocation |
+ bytes_available_gpu_memory_ = std::max(bytes_available_gpu_memory_, |
+ GetDefaultAvailableGpuMemory()); |
+ |
+ // Never go above the maximum. |
+ bytes_available_gpu_memory_ = std::min(bytes_available_gpu_memory_, |
+ GetMaximumTotalGpuMemory()); |
} |
+void GpuMemoryManager::UpdateUnmanagedMemoryLimits() { |
+ // Set the limit to be [current_, current_ + step_ / 4), with the endpoints |
+ // of the intervals rounded down and up to the nearest step_, to avoid |
+ // thrashing the interval. |
+ bytes_allocated_unmanaged_high_ = RoundUp( |
+ bytes_allocated_unmanaged_current_ + bytes_unmanaged_limit_step_ / 4, |
+ bytes_unmanaged_limit_step_); |
+ bytes_allocated_unmanaged_low_ = RoundDown( |
+ bytes_allocated_unmanaged_current_, |
+ bytes_unmanaged_limit_step_); |
+} |
+ |
void GpuMemoryManager::ScheduleManage( |
ScheduleManageTime schedule_manage_time) { |
if (disable_schedule_manage_) |
@@ -164,8 +263,14 @@ |
GetCurrentUsage()); |
} |
- if (GetCurrentUsage() > bytes_allocated_historical_max_ + |
- kBytesAllocatedUnmanagedStep) { |
+ // If we've gone past our current limit on unmanaged memory, schedule a |
+ // re-manage to take int account the unmanaged memory. |
+ if (bytes_allocated_unmanaged_current_ >= bytes_allocated_unmanaged_high_) |
+ ScheduleManage(kScheduleManageNow); |
+ if (bytes_allocated_unmanaged_current_ < bytes_allocated_unmanaged_low_) |
+ ScheduleManage(kScheduleManageLater); |
+ |
+ if (GetCurrentUsage() > bytes_allocated_historical_max_) { |
bytes_allocated_historical_max_ = GetCurrentUsage(); |
// If we're blowing into new memory usage territory, spam the browser |
// process with the most up-to-date information about our memory usage. |
@@ -214,8 +319,27 @@ |
void GpuMemoryManager::SetClientStateManagedMemoryStats( |
GpuMemoryManagerClientState* client_state, |
- const ManagedMemoryStats& stats) { |
- // TODO(ccameron): delete this from the full stack. |
+ const ManagedMemoryStats& stats) |
+{ |
+ client_state->managed_memory_stats_ = stats; |
+ |
+ // If this is the first time that stats have been received for this |
+ // client, use them immediately. |
+ if (!client_state->managed_memory_stats_received_) { |
+ client_state->managed_memory_stats_received_ = true; |
+ ScheduleManage(kScheduleManageNow); |
+ return; |
+ } |
+ |
+ // If these statistics sit outside of the range that we used in our |
+ // computation of memory allocations then recompute the allocations. |
+ if (client_state->managed_memory_stats_.bytes_nice_to_have > |
+ client_state->bytes_nicetohave_limit_high_) { |
+ ScheduleManage(kScheduleManageNow); |
+ } else if (client_state->managed_memory_stats_.bytes_nice_to_have < |
+ client_state->bytes_nicetohave_limit_low_) { |
+ ScheduleManage(kScheduleManageLater); |
+ } |
} |
uint64 GpuMemoryManager::GetClientMemoryUsage( |
@@ -271,6 +395,9 @@ |
// Update the amount of GPU memory available on the system. |
UpdateAvailableGpuMemory(); |
+ // Update the limit on unmanaged memory. |
+ UpdateUnmanagedMemoryLimits(); |
+ |
// Determine which clients are "hibernated" (which determines the |
// distribution of frontbuffers and memory among clients that don't have |
// surfaces). |
@@ -285,7 +412,243 @@ |
SendUmaStatsToBrowser(); |
} |
+// static |
+uint64 GpuMemoryManager::ComputeCap( |
+ std::vector<uint64> bytes, uint64 bytes_sum_limit) |
+{ |
+ size_t bytes_size = bytes.size(); |
+ uint64 bytes_sum = 0; |
+ |
+ if (bytes_size == 0) |
+ return std::numeric_limits<uint64>::max(); |
+ |
+ // Sort and add up all entries |
+ std::sort(bytes.begin(), bytes.end()); |
+ for (size_t i = 0; i < bytes_size; ++i) |
+ bytes_sum += bytes[i]; |
+ |
+ // As we go through the below loop, let bytes_partial_sum be the |
+ // sum of bytes[0] + ... + bytes[bytes_size - i - 1] |
+ uint64 bytes_partial_sum = bytes_sum; |
+ |
+ // Try using each entry as a cap, and see where we get cut off. |
+ for (size_t i = 0; i < bytes_size; ++i) { |
+ // Try limiting cap to bytes[bytes_size - i - 1] |
+ uint64 test_cap = bytes[bytes_size - i - 1]; |
+ uint64 bytes_sum_with_test_cap = i * test_cap + bytes_partial_sum; |
+ |
+ // If that fits, raise test_cap to give an even distribution to the |
+ // last i entries. |
+ if (bytes_sum_with_test_cap <= bytes_sum_limit) { |
+ if (i == 0) |
+ return std::numeric_limits<uint64>::max(); |
+ else |
+ return test_cap + (bytes_sum_limit - bytes_sum_with_test_cap) / i; |
+ } else { |
+ bytes_partial_sum -= test_cap; |
+ } |
+ } |
+ |
+ // If we got here, then we can't fully accommodate any of the clients, |
+ // so distribute bytes_sum_limit evenly. |
+ return bytes_sum_limit / bytes_size; |
+} |
+ |
+uint64 GpuMemoryManager::ComputeClientAllocationWhenVisible( |
+ GpuMemoryManagerClientState* client_state, |
+ uint64 bytes_above_required_cap, |
+ uint64 bytes_above_minimum_cap, |
+ uint64 bytes_overall_cap) { |
+ ManagedMemoryStats* stats = &client_state->managed_memory_stats_; |
+ |
+ if (!client_state->managed_memory_stats_received_) |
+ return GetDefaultClientAllocation(); |
+ |
+ uint64 bytes_required = 9 * stats->bytes_required / 8; |
+ bytes_required = std::min(bytes_required, GetMaximumClientAllocation()); |
+ bytes_required = std::max(bytes_required, GetMinimumClientAllocation()); |
+ |
+ uint64 bytes_nicetohave = 4 * stats->bytes_nice_to_have / 3; |
+ bytes_nicetohave = std::min(bytes_nicetohave, GetMaximumClientAllocation()); |
+ bytes_nicetohave = std::max(bytes_nicetohave, GetMinimumClientAllocation()); |
+ bytes_nicetohave = std::max(bytes_nicetohave, bytes_required); |
+ |
+ uint64 allocation = GetMinimumClientAllocation(); |
+ allocation += std::min(bytes_required - GetMinimumClientAllocation(), |
+ bytes_above_minimum_cap); |
+ allocation += std::min(bytes_nicetohave - bytes_required, |
+ bytes_above_required_cap); |
+ allocation = std::min(allocation, |
+ bytes_overall_cap); |
+ return allocation; |
+} |
+ |
+void GpuMemoryManager::ComputeVisibleSurfacesAllocations() { |
+ uint64 bytes_available_total = GetAvailableGpuMemory(); |
+ uint64 bytes_above_required_cap = std::numeric_limits<uint64>::max(); |
+ uint64 bytes_above_minimum_cap = std::numeric_limits<uint64>::max(); |
+ uint64 bytes_overall_cap_visible = GetMaximumClientAllocation(); |
+ |
+ // Compute memory usage at three levels |
+ // - painting everything that is nicetohave for visible clients |
+ // - painting only what that is visible |
+ // - giving every client the minimum allocation |
+ uint64 bytes_nicetohave_visible = 0; |
+ uint64 bytes_required_visible = 0; |
+ uint64 bytes_minimum_visible = 0; |
+ for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); |
+ it != clients_visible_mru_.end(); |
+ ++it) { |
+ GpuMemoryManagerClientState* client_state = *it; |
+ client_state->bytes_allocation_ideal_nicetohave_ = |
+ ComputeClientAllocationWhenVisible( |
+ client_state, |
+ bytes_above_required_cap, |
+ bytes_above_minimum_cap, |
+ bytes_overall_cap_visible); |
+ client_state->bytes_allocation_ideal_required_ = |
+ ComputeClientAllocationWhenVisible( |
+ client_state, |
+ 0, |
+ bytes_above_minimum_cap, |
+ bytes_overall_cap_visible); |
+ client_state->bytes_allocation_ideal_minimum_ = |
+ ComputeClientAllocationWhenVisible( |
+ client_state, |
+ 0, |
+ 0, |
+ bytes_overall_cap_visible); |
+ |
+ bytes_nicetohave_visible += |
+ client_state->bytes_allocation_ideal_nicetohave_; |
+ bytes_required_visible += |
+ client_state->bytes_allocation_ideal_required_; |
+ bytes_minimum_visible += |
+ client_state->bytes_allocation_ideal_minimum_; |
+ } |
+ |
+ // Determine which of those three points we can satisfy, and limit |
+ // bytes_above_required_cap and bytes_above_minimum_cap to not go |
+ // over the limit. |
+ if (bytes_minimum_visible > bytes_available_total) { |
+ bytes_above_required_cap = 0; |
+ bytes_above_minimum_cap = 0; |
+ } else if (bytes_required_visible > bytes_available_total) { |
+ std::vector<uint64> bytes_to_fit; |
+ for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); |
+ it != clients_visible_mru_.end(); |
+ ++it) { |
+ GpuMemoryManagerClientState* client_state = *it; |
+ bytes_to_fit.push_back(client_state->bytes_allocation_ideal_required_ - |
+ client_state->bytes_allocation_ideal_minimum_); |
+ } |
+ bytes_above_required_cap = 0; |
+ bytes_above_minimum_cap = ComputeCap( |
+ bytes_to_fit, bytes_available_total - bytes_minimum_visible); |
+ } else if (bytes_nicetohave_visible > bytes_available_total) { |
+ std::vector<uint64> bytes_to_fit; |
+ for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); |
+ it != clients_visible_mru_.end(); |
+ ++it) { |
+ GpuMemoryManagerClientState* client_state = *it; |
+ bytes_to_fit.push_back(client_state->bytes_allocation_ideal_nicetohave_ - |
+ client_state->bytes_allocation_ideal_required_); |
+ } |
+ bytes_above_required_cap = ComputeCap( |
+ bytes_to_fit, bytes_available_total - bytes_required_visible); |
+ bytes_above_minimum_cap = std::numeric_limits<uint64>::max(); |
+ } |
+ |
+ // Given those computed limits, set the actual memory allocations for the |
+ // visible clients, tracking the largest allocation and the total allocation |
+ // for future use. |
+ uint64 bytes_allocated_visible = 0; |
+ uint64 bytes_allocated_max_client_allocation = 0; |
+ for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); |
+ it != clients_visible_mru_.end(); |
+ ++it) { |
+ GpuMemoryManagerClientState* client_state = *it; |
+ client_state->bytes_allocation_when_visible_ = |
+ ComputeClientAllocationWhenVisible( |
+ client_state, |
+ bytes_above_required_cap, |
+ bytes_above_minimum_cap, |
+ bytes_overall_cap_visible); |
+ bytes_allocated_visible += client_state->bytes_allocation_when_visible_; |
+ bytes_allocated_max_client_allocation = std::max( |
+ bytes_allocated_max_client_allocation, |
+ client_state->bytes_allocation_when_visible_); |
+ } |
+ |
+ // Set the limit for nonvisible clients for when they become visible. |
+ // Use the same formula, with a lowered overall cap in case any of the |
+ // currently-nonvisible clients are much more resource-intensive than any |
+ // of the existing clients. |
+ uint64 bytes_overall_cap_nonvisible = bytes_allocated_max_client_allocation; |
+ if (bytes_available_total > bytes_allocated_visible) { |
+ bytes_overall_cap_nonvisible += |
+ bytes_available_total - bytes_allocated_visible; |
+ } |
+ bytes_overall_cap_nonvisible = std::min(bytes_overall_cap_nonvisible, |
+ GetMaximumClientAllocation()); |
+ for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin(); |
+ it != clients_nonvisible_mru_.end(); |
+ ++it) { |
+ GpuMemoryManagerClientState* client_state = *it; |
+ client_state->bytes_allocation_when_visible_ = |
+ ComputeClientAllocationWhenVisible( |
+ client_state, |
+ bytes_above_required_cap, |
+ bytes_above_minimum_cap, |
+ bytes_overall_cap_nonvisible); |
+ } |
+} |
+ |
+void GpuMemoryManager::DistributeRemainingMemoryToVisibleSurfaces() { |
+ uint64 bytes_available_total = GetAvailableGpuMemory(); |
+ uint64 bytes_allocated_total = 0; |
+ |
+ for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); |
+ it != clients_visible_mru_.end(); |
+ ++it) { |
+ GpuMemoryManagerClientState* client_state = *it; |
+ bytes_allocated_total += client_state->bytes_allocation_when_visible_; |
+ } |
+ |
+ if (bytes_allocated_total >= bytes_available_total) |
+ return; |
+ |
+ std::vector<uint64> bytes_extra_requests; |
+ for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); |
+ it != clients_visible_mru_.end(); |
+ ++it) { |
+ GpuMemoryManagerClientState* client_state = *it; |
+ CHECK(GetMaximumClientAllocation() >= |
+ client_state->bytes_allocation_when_visible_); |
+ uint64 bytes_extra = GetMaximumClientAllocation() - |
+ client_state->bytes_allocation_when_visible_; |
+ bytes_extra_requests.push_back(bytes_extra); |
+ } |
+ uint64 bytes_extra_cap = ComputeCap( |
+ bytes_extra_requests, bytes_available_total - bytes_allocated_total); |
+ for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); |
+ it != clients_visible_mru_.end(); |
+ ++it) { |
+ GpuMemoryManagerClientState* client_state = *it; |
+ uint64 bytes_extra = GetMaximumClientAllocation() - |
+ client_state->bytes_allocation_when_visible_; |
+ client_state->bytes_allocation_when_visible_ += std::min( |
+ bytes_extra, bytes_extra_cap); |
+ } |
+} |
+ |
void GpuMemoryManager::AssignSurfacesAllocations() { |
+ // Compute allocation when for all clients. |
+ ComputeVisibleSurfacesAllocations(); |
+ |
+ // Distribute the remaining memory to visible clients. |
+ DistributeRemainingMemoryToVisibleSurfaces(); |
+ |
// Send that allocation to the clients. |
ClientStateList clients = clients_visible_mru_; |
clients.insert(clients.end(), |
@@ -296,24 +659,20 @@ |
++it) { |
GpuMemoryManagerClientState* client_state = *it; |
+ // Re-assign memory limits to this client when its "nice to have" bucket |
+ // grows or shrinks by 1/4. |
+ client_state->bytes_nicetohave_limit_high_ = |
+ 5 * client_state->managed_memory_stats_.bytes_nice_to_have / 4; |
+ client_state->bytes_nicetohave_limit_low_ = |
+ 3 * client_state->managed_memory_stats_.bytes_nice_to_have / 4; |
+ |
// Populate and send the allocation to the client |
MemoryAllocation allocation; |
- allocation.bytes_limit_when_visible = client_hard_limit_bytes_; |
-#if defined(OS_ANDROID) |
- // On Android, because there is only one visible tab at any time, allow |
- // that renderer to cache as much as it can. |
- allocation.priority_cutoff_when_visible = |
- MemoryAllocation::CUTOFF_ALLOW_EVERYTHING; |
-#else |
- // On desktop platforms, instruct the renderers to cache only a smaller |
- // set, to play nice with other renderers and other applications. If this |
- // if not done, then the system can become unstable. |
- // http://crbug.com/145600 (Linux) |
- // http://crbug.com/141377 (Mac) |
- allocation.priority_cutoff_when_visible = |
- MemoryAllocation::CUTOFF_ALLOW_NICE_TO_HAVE; |
-#endif |
+ allocation.bytes_limit_when_visible = |
+ client_state->bytes_allocation_when_visible_; |
+ allocation.priority_cutoff_when_visible = priority_cutoff_; |
+ |
client_state->client_->SetMemoryAllocation(allocation); |
client_state->client_->SuggestHaveFrontBuffer(!client_state->hibernated_); |
} |
@@ -327,7 +686,8 @@ |
MemoryAllocation allocation; |
if (!client_state->hibernated_) { |
- allocation.bytes_limit_when_visible = 1; |
+ allocation.bytes_limit_when_visible = |
+ GetMinimumClientAllocation(); |
allocation.priority_cutoff_when_visible = |
MemoryAllocation::CUTOFF_ALLOW_EVERYTHING; |
} |
@@ -384,7 +744,7 @@ |
GPUMemoryUmaStats params; |
params.bytes_allocated_current = GetCurrentUsage(); |
params.bytes_allocated_max = bytes_allocated_historical_max_; |
- params.bytes_limit = client_hard_limit_bytes_; |
+ params.bytes_limit = bytes_available_gpu_memory_; |
params.client_count = clients_visible_mru_.size() + |
clients_nonvisible_mru_.size() + |
clients_nonsurface_.size(); |