OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
| 8 #include <list> |
| 9 |
8 #include "src/allocation.h" | 10 #include "src/allocation.h" |
9 #include "src/atomic-utils.h" | 11 #include "src/atomic-utils.h" |
10 #include "src/base/atomicops.h" | 12 #include "src/base/atomicops.h" |
11 #include "src/base/bits.h" | 13 #include "src/base/bits.h" |
12 #include "src/base/platform/mutex.h" | 14 #include "src/base/platform/mutex.h" |
13 #include "src/flags.h" | 15 #include "src/flags.h" |
14 #include "src/hashmap.h" | 16 #include "src/hashmap.h" |
15 #include "src/list.h" | 17 #include "src/list.h" |
16 #include "src/objects.h" | 18 #include "src/objects.h" |
17 #include "src/utils.h" | 19 #include "src/utils.h" |
(...skipping 416 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
434 // candidates selection cycle. | 436 // candidates selection cycle. |
435 FORCE_EVACUATION_CANDIDATE_FOR_TESTING, | 437 FORCE_EVACUATION_CANDIDATE_FOR_TESTING, |
436 | 438 |
437 // This flag is intended to be used for testing. | 439 // This flag is intended to be used for testing. |
438 NEVER_ALLOCATE_ON_PAGE, | 440 NEVER_ALLOCATE_ON_PAGE, |
439 | 441 |
440 // The memory chunk is already logically freed, however the actual freeing | 442 // The memory chunk is already logically freed, however the actual freeing |
441 // still has to be performed. | 443 // still has to be performed. |
442 PRE_FREED, | 444 PRE_FREED, |
443 | 445 |
| 446 // |POOLED|: When actually freeing this chunk, only uncommit and do not |
| 447 // give up the reservation as we still reuse the chunk at some point. |
| 448 POOLED, |
| 449 |
444 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page | 450 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page |
445 // has been aborted and needs special handling by the sweeper. | 451 // has been aborted and needs special handling by the sweeper. |
446 COMPACTION_WAS_ABORTED, | 452 COMPACTION_WAS_ABORTED, |
447 | 453 |
448 // Last flag, keep at bottom. | 454 // Last flag, keep at bottom. |
449 NUM_MEMORY_CHUNK_FLAGS | 455 NUM_MEMORY_CHUNK_FLAGS |
450 }; | 456 }; |
451 | 457 |
452 // |kSweepingDone|: The page state when sweeping is complete or sweeping must | 458 // |kSweepingDone|: The page state when sweeping is complete or sweeping must |
453 // not be performed on that page. Sweeper threads that are done with their | 459 // not be performed on that page. Sweeper threads that are done with their |
(...skipping 795 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1249 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0); | 1255 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0); |
1250 | 1256 |
1251 Address starts_[kSize]; | 1257 Address starts_[kSize]; |
1252 }; | 1258 }; |
1253 | 1259 |
1254 | 1260 |
1255 // ---------------------------------------------------------------------------- | 1261 // ---------------------------------------------------------------------------- |
1256 // A space acquires chunks of memory from the operating system. The memory | 1262 // A space acquires chunks of memory from the operating system. The memory |
1257 // allocator allocated and deallocates pages for the paged heap spaces and large | 1263 // allocator allocated and deallocates pages for the paged heap spaces and large |
1258 // pages for large object space. | 1264 // pages for large object space. |
1259 // | |
1260 // Each space has to manage it's own pages. | |
1261 // | |
1262 class MemoryAllocator { | 1265 class MemoryAllocator { |
1263 public: | 1266 public: |
| 1267 // Unmapper takes care of concurrently unmapping and uncommitting memory |
| 1268 // chunks. |
| 1269 class Unmapper { |
| 1270 public: |
| 1271 class UnmapFreeMemoryTask; |
| 1272 |
| 1273 explicit Unmapper(MemoryAllocator* allocator) |
| 1274 : allocator_(allocator), |
| 1275 pending_unmapping_tasks_semaphore_(0), |
| 1276 concurrent_unmapping_tasks_active_(0) {} |
| 1277 |
| 1278 void AddMemoryChunkSafe(MemoryChunk* chunk) { |
| 1279 if ((chunk->size() == Page::kPageSize) && |
| 1280 (chunk->executable() == EXECUTABLE)) { |
| 1281 AddMemoryChunkSafe<kRegular>(chunk); |
| 1282 } else { |
| 1283 AddMemoryChunkSafe<kNonRegular>(chunk); |
| 1284 } |
| 1285 } |
| 1286 |
| 1287 MemoryChunk* TryGetPooledMemoryChunkSafe() { |
| 1288 // Procedure: |
| 1289 // (1) Try to get a chunk that was declared as pooled and already has |
| 1290 // been uncommitted. |
| 1291 // (2) Try to steal any memory chunk of kPageSize that would've been |
| 1292 // unmapped. |
| 1293 MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>(); |
| 1294 if (chunk == nullptr) { |
| 1295 chunk = GetMemoryChunkSafe<kRegular>(); |
| 1296 if (chunk != nullptr) { |
| 1297 // For stolen chunks we need to manually free any allocated memory. |
| 1298 chunk->ReleaseAllocatedMemory(); |
| 1299 } |
| 1300 } |
| 1301 return chunk; |
| 1302 } |
| 1303 |
| 1304 void FreeQueuedChunks(); |
| 1305 bool WaitUntilCompleted(); |
| 1306 |
| 1307 private: |
| 1308 enum ChunkQueueType { |
| 1309 kRegular, // Pages of kPageSize that do not live in a CodeRange and |
| 1310 // can thus be used for stealing. |
| 1311 kNonRegular, // Large chunks and executable chunks. |
| 1312 kPooled, // Pooled chunks, already uncommited and ready for reuse. |
| 1313 kNumberOfChunkQueues, |
| 1314 }; |
| 1315 |
| 1316 template <ChunkQueueType type> |
| 1317 void AddMemoryChunkSafe(MemoryChunk* chunk) { |
| 1318 base::LockGuard<base::Mutex> guard(&mutex_); |
| 1319 chunks_[type].push_back(chunk); |
| 1320 } |
| 1321 |
| 1322 template <ChunkQueueType type> |
| 1323 MemoryChunk* GetMemoryChunkSafe() { |
| 1324 base::LockGuard<base::Mutex> guard(&mutex_); |
| 1325 if (chunks_[type].empty()) return nullptr; |
| 1326 MemoryChunk* chunk = chunks_[type].front(); |
| 1327 chunks_[type].pop_front(); |
| 1328 return chunk; |
| 1329 } |
| 1330 |
| 1331 void PerformFreeMemoryOnQueuedChunks(); |
| 1332 |
| 1333 base::Mutex mutex_; |
| 1334 MemoryAllocator* allocator_; |
| 1335 std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues]; |
| 1336 base::Semaphore pending_unmapping_tasks_semaphore_; |
| 1337 intptr_t concurrent_unmapping_tasks_active_; |
| 1338 |
| 1339 friend class MemoryAllocator; |
| 1340 }; |
| 1341 |
1264 enum AllocationMode { | 1342 enum AllocationMode { |
1265 kRegular, | 1343 kRegular, |
1266 kPooled, | 1344 kPooled, |
1267 }; | 1345 }; |
| 1346 enum FreeMode { |
| 1347 kFull, |
| 1348 kPreFreeAndQueue, |
| 1349 kPooledAndQueue, |
| 1350 }; |
1268 | 1351 |
1269 explicit MemoryAllocator(Isolate* isolate); | 1352 explicit MemoryAllocator(Isolate* isolate); |
1270 | 1353 |
1271 // Initializes its internal bookkeeping structures. | 1354 // Initializes its internal bookkeeping structures. |
1272 // Max capacity of the total space and executable memory limit. | 1355 // Max capacity of the total space and executable memory limit. |
1273 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable, | 1356 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable, |
1274 intptr_t code_range_size); | 1357 intptr_t code_range_size); |
1275 | 1358 |
1276 void TearDown(); | 1359 void TearDown(); |
1277 | 1360 |
1278 // Allocates either Page or NewSpacePage from the allocator. AllocationMode | 1361 // Allocates either Page or NewSpacePage from the allocator. AllocationMode |
1279 // is used to indicate whether pooled allocation, which only works for | 1362 // is used to indicate whether pooled allocation, which only works for |
1280 // MemoryChunk::kPageSize, should be tried first. | 1363 // MemoryChunk::kPageSize, should be tried first. |
1281 template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular, | 1364 template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular, |
1282 typename SpaceType> | 1365 typename SpaceType> |
1283 PageType* AllocatePage(intptr_t size, SpaceType* owner, | 1366 PageType* AllocatePage(intptr_t size, SpaceType* owner, |
1284 Executability executable); | 1367 Executability executable); |
1285 | 1368 |
1286 // PreFree logically frees the object, i.e., it takes care of the size | 1369 template <MemoryAllocator::FreeMode mode = kFull> |
1287 // bookkeeping and calls the allocation callback. | |
1288 void PreFreeMemory(MemoryChunk* chunk); | |
1289 | |
1290 // FreeMemory can be called concurrently when PreFree was executed before. | |
1291 void PerformFreeMemory(MemoryChunk* chunk); | |
1292 | |
1293 // Free is a wrapper method. For kRegular AllocationMode it calls PreFree and | |
1294 // PerformFreeMemory together. For kPooled it will dispatch to pooled free. | |
1295 template <MemoryAllocator::AllocationMode mode = kRegular> | |
1296 void Free(MemoryChunk* chunk); | 1370 void Free(MemoryChunk* chunk); |
1297 | 1371 |
1298 // Returns allocated spaces in bytes. | 1372 // Returns allocated spaces in bytes. |
1299 intptr_t Size() { return size_.Value(); } | 1373 intptr_t Size() { return size_.Value(); } |
1300 | 1374 |
1301 // Returns allocated executable spaces in bytes. | 1375 // Returns allocated executable spaces in bytes. |
1302 intptr_t SizeExecutable() { return size_executable_.Value(); } | 1376 intptr_t SizeExecutable() { return size_executable_.Value(); } |
1303 | 1377 |
1304 // Returns the maximum available bytes of heaps. | 1378 // Returns the maximum available bytes of heaps. |
1305 intptr_t Available() { | 1379 intptr_t Available() { |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1391 DCHECK_NE(LO_SPACE, space); | 1465 DCHECK_NE(LO_SPACE, space); |
1392 return (space == CODE_SPACE) ? CodePageAreaSize() | 1466 return (space == CODE_SPACE) ? CodePageAreaSize() |
1393 : Page::kAllocatableMemory; | 1467 : Page::kAllocatableMemory; |
1394 } | 1468 } |
1395 | 1469 |
1396 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, | 1470 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, |
1397 Address start, size_t commit_size, | 1471 Address start, size_t commit_size, |
1398 size_t reserved_size); | 1472 size_t reserved_size); |
1399 | 1473 |
1400 CodeRange* code_range() { return code_range_; } | 1474 CodeRange* code_range() { return code_range_; } |
| 1475 Unmapper* unmapper() { return &unmapper_; } |
1401 | 1476 |
1402 private: | 1477 private: |
| 1478 // PreFree logically frees the object, i.e., it takes care of the size |
| 1479 // bookkeeping and calls the allocation callback. |
| 1480 void PreFreeMemory(MemoryChunk* chunk); |
| 1481 |
| 1482 // FreeMemory can be called concurrently when PreFree was executed before. |
| 1483 void PerformFreeMemory(MemoryChunk* chunk); |
| 1484 |
1403 // See AllocatePage for public interface. Note that currently we only support | 1485 // See AllocatePage for public interface. Note that currently we only support |
1404 // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize. | 1486 // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize. |
1405 template <typename SpaceType> | 1487 template <typename SpaceType> |
1406 MemoryChunk* AllocatePagePooled(SpaceType* owner); | 1488 MemoryChunk* AllocatePagePooled(SpaceType* owner); |
1407 | 1489 |
1408 // Free that chunk into the pool. | |
1409 void FreePooled(MemoryChunk* chunk); | |
1410 | |
1411 Isolate* isolate_; | 1490 Isolate* isolate_; |
1412 | 1491 |
1413 CodeRange* code_range_; | 1492 CodeRange* code_range_; |
1414 | 1493 |
1415 // Maximum space size in bytes. | 1494 // Maximum space size in bytes. |
1416 intptr_t capacity_; | 1495 intptr_t capacity_; |
1417 // Maximum subset of capacity_ that can be executable | 1496 // Maximum subset of capacity_ that can be executable |
1418 intptr_t capacity_executable_; | 1497 intptr_t capacity_executable_; |
1419 | 1498 |
1420 // Allocated space size in bytes. | 1499 // Allocated space size in bytes. |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1456 // values only if they did not change in between. | 1535 // values only if they did not change in between. |
1457 void* ptr = nullptr; | 1536 void* ptr = nullptr; |
1458 do { | 1537 do { |
1459 ptr = lowest_ever_allocated_.Value(); | 1538 ptr = lowest_ever_allocated_.Value(); |
1460 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low)); | 1539 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low)); |
1461 do { | 1540 do { |
1462 ptr = highest_ever_allocated_.Value(); | 1541 ptr = highest_ever_allocated_.Value(); |
1463 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); | 1542 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); |
1464 } | 1543 } |
1465 | 1544 |
1466 List<MemoryChunk*> chunk_pool_; | |
1467 | |
1468 base::VirtualMemory last_chunk_; | 1545 base::VirtualMemory last_chunk_; |
| 1546 Unmapper unmapper_; |
1469 | 1547 |
1470 friend class TestCodeRangeScope; | 1548 friend class TestCodeRangeScope; |
1471 | 1549 |
1472 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); | 1550 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); |
1473 }; | 1551 }; |
1474 | 1552 |
1475 | 1553 |
1476 // ----------------------------------------------------------------------------- | 1554 // ----------------------------------------------------------------------------- |
1477 // Interface for heap object iterator to be implemented by all object space | 1555 // Interface for heap object iterator to be implemented by all object space |
1478 // object iterators. | 1556 // object iterators. |
(...skipping 1631 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3110 count = 0; | 3188 count = 0; |
3111 } | 3189 } |
3112 // Must be small, since an iteration is used for lookup. | 3190 // Must be small, since an iteration is used for lookup. |
3113 static const int kMaxComments = 64; | 3191 static const int kMaxComments = 64; |
3114 }; | 3192 }; |
3115 #endif | 3193 #endif |
3116 } // namespace internal | 3194 } // namespace internal |
3117 } // namespace v8 | 3195 } // namespace v8 |
3118 | 3196 |
3119 #endif // V8_HEAP_SPACES_H_ | 3197 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |