Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(20)

Side by Side Diff: src/heap/spaces.h

Issue 1929503002: Reland of "[heap] Uncommit pooled pages concurrently" (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Fixed pooling Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include <list>
9
8 #include "src/allocation.h" 10 #include "src/allocation.h"
9 #include "src/atomic-utils.h" 11 #include "src/atomic-utils.h"
10 #include "src/base/atomicops.h" 12 #include "src/base/atomicops.h"
11 #include "src/base/bits.h" 13 #include "src/base/bits.h"
12 #include "src/base/platform/mutex.h" 14 #include "src/base/platform/mutex.h"
13 #include "src/flags.h" 15 #include "src/flags.h"
14 #include "src/hashmap.h" 16 #include "src/hashmap.h"
15 #include "src/list.h" 17 #include "src/list.h"
16 #include "src/objects.h" 18 #include "src/objects.h"
17 #include "src/utils.h" 19 #include "src/utils.h"
(...skipping 415 matching lines...) Expand 10 before | Expand all | Expand 10 after
433 // candidates selection cycle. 435 // candidates selection cycle.
434 FORCE_EVACUATION_CANDIDATE_FOR_TESTING, 436 FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
435 437
436 // This flag is intended to be used for testing. 438 // This flag is intended to be used for testing.
437 NEVER_ALLOCATE_ON_PAGE, 439 NEVER_ALLOCATE_ON_PAGE,
438 440
439 // The memory chunk is already logically freed, however the actual freeing 441 // The memory chunk is already logically freed, however the actual freeing
440 // still has to be performed. 442 // still has to be performed.
441 PRE_FREED, 443 PRE_FREED,
442 444
445 // |POOLED|: When actually freeing this chunk, only uncommit and do not
446 // give up the reservation as we still reuse the chunk at some point.
447 POOLED,
448
443 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page 449 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
444 // has been aborted and needs special handling by the sweeper. 450 // has been aborted and needs special handling by the sweeper.
445 COMPACTION_WAS_ABORTED, 451 COMPACTION_WAS_ABORTED,
446 452
447 // |ANCHOR|: Flag is set if page is an anchor. 453 // |ANCHOR|: Flag is set if page is an anchor.
448 ANCHOR, 454 ANCHOR,
449 455
450 // Last flag, keep at bottom. 456 // Last flag, keep at bottom.
451 NUM_MEMORY_CHUNK_FLAGS 457 NUM_MEMORY_CHUNK_FLAGS
452 }; 458 };
(...skipping 805 matching lines...) Expand 10 before | Expand all | Expand 10 after
1258 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0); 1264 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1259 1265
1260 Address starts_[kSize]; 1266 Address starts_[kSize];
1261 }; 1267 };
1262 1268
1263 1269
1264 // ---------------------------------------------------------------------------- 1270 // ----------------------------------------------------------------------------
1265 // A space acquires chunks of memory from the operating system. The memory 1271 // A space acquires chunks of memory from the operating system. The memory
1266 // allocator allocated and deallocates pages for the paged heap spaces and large 1272 // allocator allocated and deallocates pages for the paged heap spaces and large
1267 // pages for large object space. 1273 // pages for large object space.
1268 //
1269 // Each space has to manage it's own pages.
1270 //
1271 class MemoryAllocator { 1274 class MemoryAllocator {
1272 public: 1275 public:
1276 // Unmapper takes care of concurrently unmapping and uncommitting memory
1277 // chunks.
1278 class Unmapper {
1279 public:
1280 class UnmapFreeMemoryTask;
1281
1282 explicit Unmapper(MemoryAllocator* allocator)
1283 : allocator_(allocator),
1284 pending_unmapping_tasks_semaphore_(0),
1285 concurrent_unmapping_tasks_active_(0) {}
1286
1287 void AddMemoryChunkSafe(MemoryChunk* chunk) {
1288 if ((chunk->size() == Page::kPageSize) &&
1289 (chunk->executable() != EXECUTABLE)) {
1290 AddMemoryChunkSafe<kRegular>(chunk);
1291 } else {
1292 AddMemoryChunkSafe<kNonRegular>(chunk);
1293 }
1294 }
1295
1296 MemoryChunk* TryGetPooledMemoryChunkSafe() {
1297 // Procedure:
1298 // (1) Try to get a chunk that was declared as pooled and already has
1299 // been uncommitted.
1300 // (2) Try to steal any memory chunk of kPageSize that would've been
1301 // unmapped.
1302 MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
1303 if (chunk == nullptr) {
1304 chunk = GetMemoryChunkSafe<kRegular>();
1305 if (chunk != nullptr) {
1306 // For stolen chunks we need to manually free any allocated memory.
1307 chunk->ReleaseAllocatedMemory();
1308 }
1309 }
1310 return chunk;
1311 }
1312
1313 void FreeQueuedChunks();
1314 bool WaitUntilCompleted();
1315
1316 private:
1317 enum ChunkQueueType {
1318 kRegular, // Pages of kPageSize that do not live in a CodeRange and
1319 // can thus be used for stealing.
1320 kNonRegular, // Large chunks and executable chunks.
1321 kPooled, // Pooled chunks, already uncommited and ready for reuse.
1322 kNumberOfChunkQueues,
1323 };
1324
1325 template <ChunkQueueType type>
1326 void AddMemoryChunkSafe(MemoryChunk* chunk) {
1327 base::LockGuard<base::Mutex> guard(&mutex_);
1328 chunks_[type].push_back(chunk);
1329 }
1330
1331 template <ChunkQueueType type>
1332 MemoryChunk* GetMemoryChunkSafe() {
1333 base::LockGuard<base::Mutex> guard(&mutex_);
1334 if (chunks_[type].empty()) return nullptr;
1335 MemoryChunk* chunk = chunks_[type].front();
1336 chunks_[type].pop_front();
1337 return chunk;
1338 }
1339
1340 void PerformFreeMemoryOnQueuedChunks();
1341
1342 base::Mutex mutex_;
1343 MemoryAllocator* allocator_;
1344 std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
1345 base::Semaphore pending_unmapping_tasks_semaphore_;
1346 intptr_t concurrent_unmapping_tasks_active_;
1347
1348 friend class MemoryAllocator;
1349 };
1350
1273 enum AllocationMode { 1351 enum AllocationMode {
1274 kRegular, 1352 kRegular,
1275 kPooled, 1353 kPooled,
1276 }; 1354 };
1355 enum FreeMode {
1356 kFull,
1357 kPreFreeAndQueue,
1358 kPooledAndQueue,
1359 };
1277 1360
1278 explicit MemoryAllocator(Isolate* isolate); 1361 explicit MemoryAllocator(Isolate* isolate);
1279 1362
1280 // Initializes its internal bookkeeping structures. 1363 // Initializes its internal bookkeeping structures.
1281 // Max capacity of the total space and executable memory limit. 1364 // Max capacity of the total space and executable memory limit.
1282 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable, 1365 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable,
1283 intptr_t code_range_size); 1366 intptr_t code_range_size);
1284 1367
1285 void TearDown(); 1368 void TearDown();
1286 1369
1287 // Allocates a Page from the allocator. AllocationMode is used to indicate 1370 // Allocates a Page from the allocator. AllocationMode is used to indicate
1288 // whether pooled allocation, which only works for MemoryChunk::kPageSize, 1371 // whether pooled allocation, which only works for MemoryChunk::kPageSize,
1289 // should be tried first. 1372 // should be tried first.
1290 template <MemoryAllocator::AllocationMode alloc_mode = kRegular, 1373 template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
1291 typename SpaceType> 1374 typename SpaceType>
1292 Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable); 1375 Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable);
1293 1376
1294 LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner, 1377 LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner,
1295 Executability executable); 1378 Executability executable);
1296 1379
1297 // PreFree logically frees the object, i.e., it takes care of the size 1380 template <MemoryAllocator::FreeMode mode = kFull>
1298 // bookkeeping and calls the allocation callback.
1299 void PreFreeMemory(MemoryChunk* chunk);
1300
1301 // FreeMemory can be called concurrently when PreFree was executed before.
1302 void PerformFreeMemory(MemoryChunk* chunk);
1303
1304 // Free is a wrapper method. For kRegular AllocationMode it calls PreFree and
1305 // PerformFreeMemory together. For kPooled it will dispatch to pooled free.
1306 template <MemoryAllocator::AllocationMode mode = kRegular>
1307 void Free(MemoryChunk* chunk); 1381 void Free(MemoryChunk* chunk);
1308 1382
1309 // Returns allocated spaces in bytes. 1383 // Returns allocated spaces in bytes.
1310 intptr_t Size() { return size_.Value(); } 1384 intptr_t Size() { return size_.Value(); }
1311 1385
1312 // Returns allocated executable spaces in bytes. 1386 // Returns allocated executable spaces in bytes.
1313 intptr_t SizeExecutable() { return size_executable_.Value(); } 1387 intptr_t SizeExecutable() { return size_executable_.Value(); }
1314 1388
1315 // Returns the maximum available bytes of heaps. 1389 // Returns the maximum available bytes of heaps.
1316 intptr_t Available() { 1390 intptr_t Available() {
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
1402 DCHECK_NE(LO_SPACE, space); 1476 DCHECK_NE(LO_SPACE, space);
1403 return (space == CODE_SPACE) ? CodePageAreaSize() 1477 return (space == CODE_SPACE) ? CodePageAreaSize()
1404 : Page::kAllocatableMemory; 1478 : Page::kAllocatableMemory;
1405 } 1479 }
1406 1480
1407 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, 1481 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1408 Address start, size_t commit_size, 1482 Address start, size_t commit_size,
1409 size_t reserved_size); 1483 size_t reserved_size);
1410 1484
1411 CodeRange* code_range() { return code_range_; } 1485 CodeRange* code_range() { return code_range_; }
1486 Unmapper* unmapper() { return &unmapper_; }
1412 1487
1413 private: 1488 private:
1489 // PreFree logically frees the object, i.e., it takes care of the size
1490 // bookkeeping and calls the allocation callback.
1491 void PreFreeMemory(MemoryChunk* chunk);
1492
1493 // FreeMemory can be called concurrently when PreFree was executed before.
1494 void PerformFreeMemory(MemoryChunk* chunk);
1495
1414 // See AllocatePage for public interface. Note that currently we only support 1496 // See AllocatePage for public interface. Note that currently we only support
1415 // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize. 1497 // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1416 template <typename SpaceType> 1498 template <typename SpaceType>
1417 MemoryChunk* AllocatePagePooled(SpaceType* owner); 1499 MemoryChunk* AllocatePagePooled(SpaceType* owner);
1418 1500
1419 // Free that chunk into the pool.
1420 void FreePooled(MemoryChunk* chunk);
1421
1422 Isolate* isolate_; 1501 Isolate* isolate_;
1423 1502
1424 CodeRange* code_range_; 1503 CodeRange* code_range_;
1425 1504
1426 // Maximum space size in bytes. 1505 // Maximum space size in bytes.
1427 intptr_t capacity_; 1506 intptr_t capacity_;
1428 // Maximum subset of capacity_ that can be executable 1507 // Maximum subset of capacity_ that can be executable
1429 intptr_t capacity_executable_; 1508 intptr_t capacity_executable_;
1430 1509
1431 // Allocated space size in bytes. 1510 // Allocated space size in bytes.
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
1467 // values only if they did not change in between. 1546 // values only if they did not change in between.
1468 void* ptr = nullptr; 1547 void* ptr = nullptr;
1469 do { 1548 do {
1470 ptr = lowest_ever_allocated_.Value(); 1549 ptr = lowest_ever_allocated_.Value();
1471 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low)); 1550 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1472 do { 1551 do {
1473 ptr = highest_ever_allocated_.Value(); 1552 ptr = highest_ever_allocated_.Value();
1474 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); 1553 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
1475 } 1554 }
1476 1555
1477 List<MemoryChunk*> chunk_pool_;
1478
1479 base::VirtualMemory last_chunk_; 1556 base::VirtualMemory last_chunk_;
1557 Unmapper unmapper_;
1480 1558
1481 friend class TestCodeRangeScope; 1559 friend class TestCodeRangeScope;
1482 1560
1483 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); 1561 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1484 }; 1562 };
1485 1563
1486 1564
1487 // ----------------------------------------------------------------------------- 1565 // -----------------------------------------------------------------------------
1488 // Interface for heap object iterator to be implemented by all object space 1566 // Interface for heap object iterator to be implemented by all object space
1489 // object iterators. 1567 // object iterators.
(...skipping 1548 matching lines...) Expand 10 before | Expand all | Expand 10 after
3038 count = 0; 3116 count = 0;
3039 } 3117 }
3040 // Must be small, since an iteration is used for lookup. 3118 // Must be small, since an iteration is used for lookup.
3041 static const int kMaxComments = 64; 3119 static const int kMaxComments = 64;
3042 }; 3120 };
3043 #endif 3121 #endif
3044 } // namespace internal 3122 } // namespace internal
3045 } // namespace v8 3123 } // namespace v8
3046 3124
3047 #endif // V8_HEAP_SPACES_H_ 3125 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698