Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(93)

Side by Side Diff: content/common/gpu/gpu_memory_manager_unittest.cc

Issue 308743005: Lobotomize the GPU memory manager (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Update OWNER Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « content/common/gpu/gpu_memory_manager_client.cc ('k') | content/test/gpu/gpu_tests/memory.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/gpu_memory_manager.h" 5 #include "content/common/gpu/gpu_memory_manager.h"
6 #include "content/common/gpu/gpu_memory_manager_client.h" 6 #include "content/common/gpu/gpu_memory_manager_client.h"
7 #include "content/common/gpu/gpu_memory_tracking.h" 7 #include "content/common/gpu/gpu_memory_tracking.h"
8 #include "gpu/command_buffer/common/gpu_memory_allocation.h" 8 #include "gpu/command_buffer/common/gpu_memory_allocation.h"
9 #include "ui/gfx/size_conversions.h" 9 #include "ui/gfx/size_conversions.h"
10 10
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after
183 bool IsAllocationBackgroundForSurfaceYes( 183 bool IsAllocationBackgroundForSurfaceYes(
184 const MemoryAllocation& alloc) { 184 const MemoryAllocation& alloc) {
185 return true; 185 return true;
186 } 186 }
187 bool IsAllocationHibernatedForSurfaceYes( 187 bool IsAllocationHibernatedForSurfaceYes(
188 const MemoryAllocation& alloc) { 188 const MemoryAllocation& alloc) {
189 return true; 189 return true;
190 } 190 }
191 bool IsAllocationForegroundForSurfaceNo( 191 bool IsAllocationForegroundForSurfaceNo(
192 const MemoryAllocation& alloc) { 192 const MemoryAllocation& alloc) {
193 return alloc.bytes_limit_when_visible == 193 return alloc.bytes_limit_when_visible == 1;
194 GetMinimumClientAllocation();
195 } 194 }
196 bool IsAllocationBackgroundForSurfaceNo( 195 bool IsAllocationBackgroundForSurfaceNo(
197 const MemoryAllocation& alloc) { 196 const MemoryAllocation& alloc) {
198 return alloc.bytes_limit_when_visible == 197 return alloc.bytes_limit_when_visible == 1;
199 GetMinimumClientAllocation();
200 } 198 }
201 bool IsAllocationHibernatedForSurfaceNo( 199 bool IsAllocationHibernatedForSurfaceNo(
202 const MemoryAllocation& alloc) { 200 const MemoryAllocation& alloc) {
203 return alloc.bytes_limit_when_visible == 0; 201 return alloc.bytes_limit_when_visible == 0;
204 } 202 }
205 203
206 void Manage() { 204 void Manage() {
207 ClientAssignmentCollector::ClearAllStats(); 205 ClientAssignmentCollector::ClearAllStats();
208 memmgr_.Manage(); 206 memmgr_.Manage();
209 } 207 }
210 208
211 uint64 CalcAvailableFromGpuTotal(uint64 bytes) {
212 return GpuMemoryManager::CalcAvailableFromGpuTotal(bytes);
213 }
214
215 uint64 CalcAvailableClamped(uint64 bytes) {
216 bytes = std::max(bytes, memmgr_.GetDefaultAvailableGpuMemory());
217 bytes = std::min(bytes, memmgr_.GetMaximumTotalGpuMemory());
218 return bytes;
219 }
220
221 uint64 GetAvailableGpuMemory() {
222 return memmgr_.GetAvailableGpuMemory();
223 }
224
225 uint64 GetMaximumClientAllocation() {
226 return memmgr_.GetMaximumClientAllocation();
227 }
228
229 uint64 GetMinimumClientAllocation() {
230 return memmgr_.GetMinimumClientAllocation();
231 }
232
233 void SetClientStats( 209 void SetClientStats(
234 FakeClient* client, 210 FakeClient* client,
235 uint64 required, 211 uint64 required,
236 uint64 nicetohave) { 212 uint64 nicetohave) {
237 client->SetManagedMemoryStats( 213 client->SetManagedMemoryStats(
238 ManagedMemoryStats(required, nicetohave, 0, false)); 214 ManagedMemoryStats(required, nicetohave, 0, false));
239 } 215 }
240 216
241 GpuMemoryManager memmgr_; 217 GpuMemoryManager memmgr_;
242 }; 218 };
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after
446 // and more important). 422 // and more important).
447 stub_ignore_c.SetVisible(true); 423 stub_ignore_c.SetVisible(true);
448 stub_ignore_c.SetVisible(false); 424 stub_ignore_c.SetVisible(false);
449 Manage(); 425 Manage();
450 EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub1.allocation_)); 426 EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub1.allocation_));
451 EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub2.allocation_)); 427 EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub2.allocation_));
452 EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub3.allocation_)); 428 EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub3.allocation_));
453 EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub4.allocation_)); 429 EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub4.allocation_));
454 } 430 }
455 431
456 // Test GpuMemoryManager::UpdateAvailableGpuMemory functionality
457 TEST_F(GpuMemoryManagerTest, TestUpdateAvailableGpuMemory) {
458 FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true),
459 stub2(&memmgr_, GenerateUniqueSurfaceId(), false),
460 stub3(&memmgr_, GenerateUniqueSurfaceId(), true),
461 stub4(&memmgr_, GenerateUniqueSurfaceId(), false);
462 // We take the lowest GPU's total memory as the limit
463 uint64 expected = 400 * 1024 * 1024;
464 stub1.SetTotalGpuMemory(expected); // GPU Memory
465 stub2.SetTotalGpuMemory(expected - 1024 * 1024); // Smaller but not visible.
466 stub3.SetTotalGpuMemory(expected + 1024 * 1024); // Visible but larger.
467 stub4.SetTotalGpuMemory(expected + 1024 * 1024); // Not visible and larger.
468 Manage();
469 uint64 bytes_expected = CalcAvailableFromGpuTotal(expected);
470 EXPECT_EQ(GetAvailableGpuMemory(), CalcAvailableClamped(bytes_expected));
471 }
472
473 // Test GpuMemoryManager Stub Memory Stats functionality:
474 // Creates various surface/non-surface stubs and switches stub visibility and
475 // tests to see that stats data structure values are correct.
476 TEST_F(GpuMemoryManagerTest, StubMemoryStatsForLastManageTests) {
477 ClientAssignmentCollector::ClientMemoryStatMap stats;
478
479 Manage();
480 stats = ClientAssignmentCollector::GetClientStatsForLastManage();
481 EXPECT_EQ(stats.size(), 0ul);
482
483 FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true);
484 Manage();
485 stats = ClientAssignmentCollector::GetClientStatsForLastManage();
486 uint64 stub1allocation1 =
487 stats[&stub1].allocation.bytes_limit_when_visible;
488
489 EXPECT_EQ(stats.size(), 1ul);
490 EXPECT_GT(stub1allocation1, 0ul);
491
492 FakeClient stub2(&memmgr_, &stub1);
493 Manage();
494 stats = ClientAssignmentCollector::GetClientStatsForLastManage();
495 EXPECT_EQ(stats.count(&stub1), 1ul);
496 uint64 stub1allocation2 =
497 stats[&stub1].allocation.bytes_limit_when_visible;
498 EXPECT_EQ(stats.count(&stub2), 1ul);
499 uint64 stub2allocation2 =
500 stats[&stub2].allocation.bytes_limit_when_visible;
501
502 EXPECT_EQ(stats.size(), 2ul);
503 EXPECT_GT(stub1allocation2, 0ul);
504 EXPECT_GT(stub2allocation2, 0ul);
505 if (stub1allocation2 != GetMaximumClientAllocation())
506 EXPECT_LT(stub1allocation2, stub1allocation1);
507
508 FakeClient stub3(&memmgr_, GenerateUniqueSurfaceId(), true);
509 Manage();
510 stats = ClientAssignmentCollector::GetClientStatsForLastManage();
511 uint64 stub1allocation3 =
512 stats[&stub1].allocation.bytes_limit_when_visible;
513 uint64 stub2allocation3 =
514 stats[&stub2].allocation.bytes_limit_when_visible;
515 uint64 stub3allocation3 =
516 stats[&stub3].allocation.bytes_limit_when_visible;
517
518 EXPECT_EQ(stats.size(), 3ul);
519 EXPECT_GT(stub1allocation3, 0ul);
520 EXPECT_GT(stub2allocation3, 0ul);
521 EXPECT_GT(stub3allocation3, 0ul);
522 if (stub1allocation3 != GetMaximumClientAllocation())
523 EXPECT_LT(stub1allocation3, stub1allocation2);
524
525 stub1.SetVisible(false);
526
527 Manage();
528 stats = ClientAssignmentCollector::GetClientStatsForLastManage();
529 uint64 stub1allocation4 =
530 stats[&stub1].allocation.bytes_limit_when_visible;
531 uint64 stub2allocation4 =
532 stats[&stub2].allocation.bytes_limit_when_visible;
533 uint64 stub3allocation4 =
534 stats[&stub3].allocation.bytes_limit_when_visible;
535
536 EXPECT_EQ(stats.size(), 3ul);
537 EXPECT_GT(stub1allocation4, 0ul);
538 EXPECT_GE(stub2allocation4, 0ul);
539 EXPECT_GT(stub3allocation4, 0ul);
540 if (stub3allocation3 != GetMaximumClientAllocation())
541 EXPECT_GT(stub3allocation4, stub3allocation3);
542 }
543
544 // Test tracking of unmanaged (e.g, WebGL) memory.
545 TEST_F(GpuMemoryManagerTest, UnmanagedTracking) {
546 // Set memory manager constants for this test
547 memmgr_.TestingSetAvailableGpuMemory(64);
548 memmgr_.TestingSetMinimumClientAllocation(8);
549 memmgr_.TestingSetUnmanagedLimitStep(16);
550
551 FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true);
552
553 // Expect that the one stub get its nicetohave level.
554 SetClientStats(&stub1, 16, 32);
555 Manage();
556 EXPECT_GE(stub1.BytesWhenVisible(), 32u);
557
558 // Now allocate some unmanaged memory and make sure the amount
559 // goes down.
560 memmgr_.TrackMemoryAllocatedChange(
561 stub1.tracking_group_.get(),
562 0,
563 48,
564 gpu::gles2::MemoryTracker::kUnmanaged);
565 Manage();
566 EXPECT_LT(stub1.BytesWhenVisible(), 24u);
567
568 // Now allocate the entire FB worth of unmanaged memory, and
569 // make sure that we stay stuck at the minimum tab allocation.
570 memmgr_.TrackMemoryAllocatedChange(
571 stub1.tracking_group_.get(),
572 48,
573 64,
574 gpu::gles2::MemoryTracker::kUnmanaged);
575 Manage();
576 EXPECT_EQ(stub1.BytesWhenVisible(), 8u);
577
578 // Far-oversubscribe the entire FB, and make sure we stay at
579 // the minimum allocation, and don't blow up.
580 memmgr_.TrackMemoryAllocatedChange(
581 stub1.tracking_group_.get(),
582 64,
583 999,
584 gpu::gles2::MemoryTracker::kUnmanaged);
585 Manage();
586 EXPECT_EQ(stub1.BytesWhenVisible(), 8u);
587
588 // Delete all tracked memory so we don't hit leak checks.
589 memmgr_.TrackMemoryAllocatedChange(
590 stub1.tracking_group_.get(),
591 999,
592 0,
593 gpu::gles2::MemoryTracker::kUnmanaged);
594 }
595
596 // Test the default allocation levels are used.
597 TEST_F(GpuMemoryManagerTest, DefaultAllocation) {
598 // Set memory manager constants for this test
599 memmgr_.TestingSetAvailableGpuMemory(64);
600 memmgr_.TestingSetMinimumClientAllocation(8);
601 memmgr_.TestingSetDefaultClientAllocation(16);
602
603 FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true);
604
605 // Expect that a client which has not sent stats receive at
606 // least the default allocation.
607 Manage();
608 EXPECT_GE(stub1.BytesWhenVisible(),
609 memmgr_.GetDefaultClientAllocation());
610 }
611
612 } // namespace content 432 } // namespace content
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_memory_manager_client.cc ('k') | content/test/gpu/gpu_tests/memory.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698