Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
| 7 | 7 |
| 8 #include <list> | |
| 9 | |
| 8 #include "src/allocation.h" | 10 #include "src/allocation.h" |
| 9 #include "src/atomic-utils.h" | 11 #include "src/atomic-utils.h" |
| 10 #include "src/base/atomicops.h" | 12 #include "src/base/atomicops.h" |
| 11 #include "src/base/bits.h" | 13 #include "src/base/bits.h" |
| 12 #include "src/base/platform/mutex.h" | 14 #include "src/base/platform/mutex.h" |
| 13 #include "src/flags.h" | 15 #include "src/flags.h" |
| 14 #include "src/hashmap.h" | 16 #include "src/hashmap.h" |
| 15 #include "src/list.h" | 17 #include "src/list.h" |
| 16 #include "src/objects.h" | 18 #include "src/objects.h" |
| 17 #include "src/utils.h" | 19 #include "src/utils.h" |
| (...skipping 416 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 434 // candidates selection cycle. | 436 // candidates selection cycle. |
| 435 FORCE_EVACUATION_CANDIDATE_FOR_TESTING, | 437 FORCE_EVACUATION_CANDIDATE_FOR_TESTING, |
| 436 | 438 |
| 437 // This flag is intended to be used for testing. | 439 // This flag is intended to be used for testing. |
| 438 NEVER_ALLOCATE_ON_PAGE, | 440 NEVER_ALLOCATE_ON_PAGE, |
| 439 | 441 |
| 440 // The memory chunk is already logically freed, however the actual freeing | 442 // The memory chunk is already logically freed, however the actual freeing |
| 441 // still has to be performed. | 443 // still has to be performed. |
| 442 PRE_FREED, | 444 PRE_FREED, |
| 443 | 445 |
| 446 // |POOLED|: When actually freeing this chunk, only uncommit and do not | |
| 447 // give up the reservation as we still reuse the chunk at some point. | |
| 448 POOLED, | |
| 449 | |
| 444 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page | 450 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page |
| 445 // has been aborted and needs special handling by the sweeper. | 451 // has been aborted and needs special handling by the sweeper. |
| 446 COMPACTION_WAS_ABORTED, | 452 COMPACTION_WAS_ABORTED, |
| 447 | 453 |
| 448 // Last flag, keep at bottom. | 454 // Last flag, keep at bottom. |
| 449 NUM_MEMORY_CHUNK_FLAGS | 455 NUM_MEMORY_CHUNK_FLAGS |
| 450 }; | 456 }; |
| 451 | 457 |
| 452 // |kSweepingDone|: The page state when sweeping is complete or sweeping must | 458 // |kSweepingDone|: The page state when sweeping is complete or sweeping must |
| 453 // not be performed on that page. Sweeper threads that are done with their | 459 // not be performed on that page. Sweeper threads that are done with their |
| (...skipping 795 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1249 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0); | 1255 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0); |
| 1250 | 1256 |
| 1251 Address starts_[kSize]; | 1257 Address starts_[kSize]; |
| 1252 }; | 1258 }; |
| 1253 | 1259 |
| 1254 | 1260 |
| 1255 // ---------------------------------------------------------------------------- | 1261 // ---------------------------------------------------------------------------- |
| 1256 // A space acquires chunks of memory from the operating system. The memory | 1262 // A space acquires chunks of memory from the operating system. The memory |
| 1257 // allocator allocated and deallocates pages for the paged heap spaces and large | 1263 // allocator allocated and deallocates pages for the paged heap spaces and large |
| 1258 // pages for large object space. | 1264 // pages for large object space. |
| 1259 // | |
| 1260 // Each space has to manage it's own pages. | |
| 1261 // | |
| 1262 class MemoryAllocator { | 1265 class MemoryAllocator { |
| 1263 public: | 1266 public: |
| 1267 // Unmapper takes care of concurrently unmapping and uncommitting memory | |
| 1268 // chunks. | |
| 1269 class Unmapper { | |
| 1270 public: | |
| 1271 class UnmapFreeMemoryTask; | |
| 1272 | |
| 1273 explicit Unmapper(MemoryAllocator* allocator) | |
| 1274 : allocator_(allocator), | |
| 1275 pending_unmapping_tasks_semaphore_(0), | |
| 1276 concurrent_unmapping_tasks_active_(0) {} | |
| 1277 | |
| 1278 void AddMemoryChunkSafe(MemoryChunk* chunk) { | |
| 1279 if ((chunk->size() == Page::kPageSize) && | |
| 1280 (chunk->executable() == EXECUTABLE)) { | |
| 1281 AddMemoryChunkSafe<kRegularProcess>(chunk); | |
| 1282 } else { | |
| 1283 AddMemoryChunkSafe<kNonRegularProcess>(chunk); | |
| 1284 } | |
| 1285 } | |
| 1286 | |
| 1287 MemoryChunk* TryGetPooledMemoryChunkSafe() { | |
| 1288 // Procedure: | |
| 1289 // (1) Try to get a chunk that was declared as pooled and already has | |
| 1290 // been uncommitted. | |
| 1291 // (2) Try to steal any memory chunk of kPageSize that would've been | |
| 1292 // unmapped. | |
| 1293 MemoryChunk* chunk = GetMemoryChunkSafe<kPooledOutput>(); | |
| 1294 if (chunk == nullptr) { | |
| 1295 chunk = GetMemoryChunkSafe<kRegularProcess>(); | |
| 1296 if (chunk != nullptr) { | |
| 1297 // For stolen chunks we need to manually free any allocated memory. | |
| 1298 chunk->ReleaseAllocatedMemory(); | |
| 1299 } | |
| 1300 } | |
| 1301 return chunk; | |
| 1302 } | |
| 1303 | |
| 1304 void FreeQueuedChunks(); | |
| 1305 bool WaitUntilCompleted(); | |
| 1306 | |
| 1307 private: | |
| 1308 enum ChunkQueueType { | |
| 1309 kRegularProcess, // Pages of kPageSize that do not live in a CodeRange | |
|
Hannes Payer (out of office)
2016/04/26 13:01:27
kRegular
Michael Lippautz
2016/04/26 13:57:51
Done.
| |
| 1310 // and can thus be used for stealing. | |
| 1311 kNonRegularProcess, // Large chunks and executable chunks. | |
|
Hannes Payer (out of office)
2016/04/26 13:01:27
kNonRegular
Michael Lippautz
2016/04/26 13:57:51
Done.
| |
| 1312 kPooledOutput, // Pooled chunks, already uncommited and ready for | |
|
Hannes Payer (out of office)
2016/04/26 13:01:27
kPooled
Michael Lippautz
2016/04/26 13:57:51
Done.
| |
| 1313 // reuse. | |
| 1314 kNumberOfChunkQueues, | |
| 1315 }; | |
| 1316 | |
| 1317 template <ChunkQueueType type> | |
| 1318 void AddMemoryChunkSafe(MemoryChunk* chunk) { | |
| 1319 base::LockGuard<base::Mutex> guard(&mutex_); | |
| 1320 chunks_[type].push_back(chunk); | |
| 1321 } | |
| 1322 | |
| 1323 template <ChunkQueueType type> | |
| 1324 MemoryChunk* GetMemoryChunkSafe() { | |
| 1325 base::LockGuard<base::Mutex> guard(&mutex_); | |
| 1326 if (chunks_[type].empty()) return nullptr; | |
| 1327 MemoryChunk* chunk = chunks_[type].front(); | |
| 1328 chunks_[type].pop_front(); | |
| 1329 return chunk; | |
| 1330 } | |
| 1331 | |
| 1332 void PerformFreeMemoryOnQueuedChunks(); | |
| 1333 | |
| 1334 base::Mutex mutex_; | |
| 1335 MemoryAllocator* allocator_; | |
| 1336 std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues]; | |
| 1337 base::Semaphore pending_unmapping_tasks_semaphore_; | |
| 1338 intptr_t concurrent_unmapping_tasks_active_; | |
| 1339 | |
| 1340 friend class MemoryAllocator; | |
| 1341 }; | |
| 1342 | |
| 1264 enum AllocationMode { | 1343 enum AllocationMode { |
| 1265 kRegular, | 1344 kRegular, |
| 1266 kPooled, | 1345 kPooled, |
| 1267 }; | 1346 }; |
| 1347 enum FreeMode { | |
| 1348 kFull, | |
| 1349 kPreFreeAndQueue, | |
| 1350 kPooledAndQueue, | |
| 1351 }; | |
| 1268 | 1352 |
| 1269 explicit MemoryAllocator(Isolate* isolate); | 1353 explicit MemoryAllocator(Isolate* isolate); |
| 1270 | 1354 |
| 1271 // Initializes its internal bookkeeping structures. | 1355 // Initializes its internal bookkeeping structures. |
| 1272 // Max capacity of the total space and executable memory limit. | 1356 // Max capacity of the total space and executable memory limit. |
| 1273 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable, | 1357 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable, |
| 1274 intptr_t code_range_size); | 1358 intptr_t code_range_size); |
| 1275 | 1359 |
| 1276 void TearDown(); | 1360 void TearDown(); |
| 1277 | 1361 |
| 1278 // Allocates either Page or NewSpacePage from the allocator. AllocationMode | 1362 // Allocates either Page or NewSpacePage from the allocator. AllocationMode |
| 1279 // is used to indicate whether pooled allocation, which only works for | 1363 // is used to indicate whether pooled allocation, which only works for |
| 1280 // MemoryChunk::kPageSize, should be tried first. | 1364 // MemoryChunk::kPageSize, should be tried first. |
| 1281 template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular, | 1365 template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular, |
| 1282 typename SpaceType> | 1366 typename SpaceType> |
| 1283 PageType* AllocatePage(intptr_t size, SpaceType* owner, | 1367 PageType* AllocatePage(intptr_t size, SpaceType* owner, |
| 1284 Executability executable); | 1368 Executability executable); |
| 1285 | 1369 |
| 1286 // PreFree logically frees the object, i.e., it takes care of the size | 1370 template <MemoryAllocator::FreeMode mode = kFull> |
| 1287 // bookkeeping and calls the allocation callback. | |
| 1288 void PreFreeMemory(MemoryChunk* chunk); | |
| 1289 | |
| 1290 // FreeMemory can be called concurrently when PreFree was executed before. | |
| 1291 void PerformFreeMemory(MemoryChunk* chunk); | |
| 1292 | |
| 1293 // Free is a wrapper method. For kRegular AllocationMode it calls PreFree and | |
| 1294 // PerformFreeMemory together. For kPooled it will dispatch to pooled free. | |
| 1295 template <MemoryAllocator::AllocationMode mode = kRegular> | |
| 1296 void Free(MemoryChunk* chunk); | 1371 void Free(MemoryChunk* chunk); |
| 1297 | 1372 |
| 1298 // Returns allocated spaces in bytes. | 1373 // Returns allocated spaces in bytes. |
| 1299 intptr_t Size() { return size_.Value(); } | 1374 intptr_t Size() { return size_.Value(); } |
| 1300 | 1375 |
| 1301 // Returns allocated executable spaces in bytes. | 1376 // Returns allocated executable spaces in bytes. |
| 1302 intptr_t SizeExecutable() { return size_executable_.Value(); } | 1377 intptr_t SizeExecutable() { return size_executable_.Value(); } |
| 1303 | 1378 |
| 1304 // Returns the maximum available bytes of heaps. | 1379 // Returns the maximum available bytes of heaps. |
| 1305 intptr_t Available() { | 1380 intptr_t Available() { |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1391 DCHECK_NE(LO_SPACE, space); | 1466 DCHECK_NE(LO_SPACE, space); |
| 1392 return (space == CODE_SPACE) ? CodePageAreaSize() | 1467 return (space == CODE_SPACE) ? CodePageAreaSize() |
| 1393 : Page::kAllocatableMemory; | 1468 : Page::kAllocatableMemory; |
| 1394 } | 1469 } |
| 1395 | 1470 |
| 1396 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, | 1471 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, |
| 1397 Address start, size_t commit_size, | 1472 Address start, size_t commit_size, |
| 1398 size_t reserved_size); | 1473 size_t reserved_size); |
| 1399 | 1474 |
| 1400 CodeRange* code_range() { return code_range_; } | 1475 CodeRange* code_range() { return code_range_; } |
| 1476 Unmapper* unmapper() { return &unmapper_; } | |
| 1401 | 1477 |
| 1402 private: | 1478 private: |
| 1479 // PreFree logically frees the object, i.e., it takes care of the size | |
| 1480 // bookkeeping and calls the allocation callback. | |
| 1481 void PreFreeMemory(MemoryChunk* chunk); | |
| 1482 | |
| 1483 // FreeMemory can be called concurrently when PreFree was executed before. | |
| 1484 void PerformFreeMemory(MemoryChunk* chunk); | |
| 1485 | |
| 1403 // See AllocatePage for public interface. Note that currently we only support | 1486 // See AllocatePage for public interface. Note that currently we only support |
| 1404 // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize. | 1487 // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize. |
| 1405 template <typename SpaceType> | 1488 template <typename SpaceType> |
| 1406 MemoryChunk* AllocatePagePooled(SpaceType* owner); | 1489 MemoryChunk* AllocatePagePooled(SpaceType* owner); |
| 1407 | 1490 |
| 1408 // Free that chunk into the pool. | |
| 1409 void FreePooled(MemoryChunk* chunk); | |
| 1410 | |
| 1411 Isolate* isolate_; | 1491 Isolate* isolate_; |
| 1412 | 1492 |
| 1413 CodeRange* code_range_; | 1493 CodeRange* code_range_; |
| 1414 | 1494 |
| 1415 // Maximum space size in bytes. | 1495 // Maximum space size in bytes. |
| 1416 intptr_t capacity_; | 1496 intptr_t capacity_; |
| 1417 // Maximum subset of capacity_ that can be executable | 1497 // Maximum subset of capacity_ that can be executable |
| 1418 intptr_t capacity_executable_; | 1498 intptr_t capacity_executable_; |
| 1419 | 1499 |
| 1420 // Allocated space size in bytes. | 1500 // Allocated space size in bytes. |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1456 // values only if they did not change in between. | 1536 // values only if they did not change in between. |
| 1457 void* ptr = nullptr; | 1537 void* ptr = nullptr; |
| 1458 do { | 1538 do { |
| 1459 ptr = lowest_ever_allocated_.Value(); | 1539 ptr = lowest_ever_allocated_.Value(); |
| 1460 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low)); | 1540 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low)); |
| 1461 do { | 1541 do { |
| 1462 ptr = highest_ever_allocated_.Value(); | 1542 ptr = highest_ever_allocated_.Value(); |
| 1463 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); | 1543 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); |
| 1464 } | 1544 } |
| 1465 | 1545 |
| 1466 List<MemoryChunk*> chunk_pool_; | |
| 1467 | |
| 1468 base::VirtualMemory last_chunk_; | 1546 base::VirtualMemory last_chunk_; |
| 1547 Unmapper unmapper_; | |
| 1469 | 1548 |
| 1470 friend class TestCodeRangeScope; | 1549 friend class TestCodeRangeScope; |
| 1471 | 1550 |
| 1472 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); | 1551 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); |
| 1473 }; | 1552 }; |
| 1474 | 1553 |
| 1475 | 1554 |
| 1476 // ----------------------------------------------------------------------------- | 1555 // ----------------------------------------------------------------------------- |
| 1477 // Interface for heap object iterator to be implemented by all object space | 1556 // Interface for heap object iterator to be implemented by all object space |
| 1478 // object iterators. | 1557 // object iterators. |
| (...skipping 1631 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3110 count = 0; | 3189 count = 0; |
| 3111 } | 3190 } |
| 3112 // Must be small, since an iteration is used for lookup. | 3191 // Must be small, since an iteration is used for lookup. |
| 3113 static const int kMaxComments = 64; | 3192 static const int kMaxComments = 64; |
| 3114 }; | 3193 }; |
| 3115 #endif | 3194 #endif |
| 3116 } // namespace internal | 3195 } // namespace internal |
| 3117 } // namespace v8 | 3196 } // namespace v8 |
| 3118 | 3197 |
| 3119 #endif // V8_HEAP_SPACES_H_ | 3198 #endif // V8_HEAP_SPACES_H_ |
| OLD | NEW |