Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(258)

Side by Side Diff: src/heap/spaces.h

Issue 1925563003: Revert of [heap] Uncommit pooled pages concurrently (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include <list>
9
10 #include "src/allocation.h" 8 #include "src/allocation.h"
11 #include "src/atomic-utils.h" 9 #include "src/atomic-utils.h"
12 #include "src/base/atomicops.h" 10 #include "src/base/atomicops.h"
13 #include "src/base/bits.h" 11 #include "src/base/bits.h"
14 #include "src/base/platform/mutex.h" 12 #include "src/base/platform/mutex.h"
15 #include "src/flags.h" 13 #include "src/flags.h"
16 #include "src/hashmap.h" 14 #include "src/hashmap.h"
17 #include "src/list.h" 15 #include "src/list.h"
18 #include "src/objects.h" 16 #include "src/objects.h"
19 #include "src/utils.h" 17 #include "src/utils.h"
(...skipping 416 matching lines...) Expand 10 before | Expand all | Expand 10 after
436 // candidates selection cycle. 434 // candidates selection cycle.
437 FORCE_EVACUATION_CANDIDATE_FOR_TESTING, 435 FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
438 436
439 // This flag is intended to be used for testing. 437 // This flag is intended to be used for testing.
440 NEVER_ALLOCATE_ON_PAGE, 438 NEVER_ALLOCATE_ON_PAGE,
441 439
442 // The memory chunk is already logically freed, however the actual freeing 440 // The memory chunk is already logically freed, however the actual freeing
443 // still has to be performed. 441 // still has to be performed.
444 PRE_FREED, 442 PRE_FREED,
445 443
446 // |POOLED|: When actually freeing this chunk, only uncommit and do not
447 // give up the reservation as we still reuse the chunk at some point.
448 POOLED,
449
450 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page 444 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
451 // has been aborted and needs special handling by the sweeper. 445 // has been aborted and needs special handling by the sweeper.
452 COMPACTION_WAS_ABORTED, 446 COMPACTION_WAS_ABORTED,
453 447
454 // Last flag, keep at bottom. 448 // Last flag, keep at bottom.
455 NUM_MEMORY_CHUNK_FLAGS 449 NUM_MEMORY_CHUNK_FLAGS
456 }; 450 };
457 451
458 // |kSweepingDone|: The page state when sweeping is complete or sweeping must 452 // |kSweepingDone|: The page state when sweeping is complete or sweeping must
459 // not be performed on that page. Sweeper threads that are done with their 453 // not be performed on that page. Sweeper threads that are done with their
(...skipping 795 matching lines...) Expand 10 before | Expand all | Expand 10 after
1255 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0); 1249 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1256 1250
1257 Address starts_[kSize]; 1251 Address starts_[kSize];
1258 }; 1252 };
1259 1253
1260 1254
1261 // ---------------------------------------------------------------------------- 1255 // ----------------------------------------------------------------------------
1262 // A space acquires chunks of memory from the operating system. The memory 1256 // A space acquires chunks of memory from the operating system. The memory
1263 // allocator allocated and deallocates pages for the paged heap spaces and large 1257 // allocator allocated and deallocates pages for the paged heap spaces and large
1264 // pages for large object space. 1258 // pages for large object space.
1259 //
1260 // Each space has to manage it's own pages.
1261 //
1265 class MemoryAllocator { 1262 class MemoryAllocator {
1266 public: 1263 public:
1267 // Unmapper takes care of concurrently unmapping and uncommitting memory
1268 // chunks.
1269 class Unmapper {
1270 public:
1271 class UnmapFreeMemoryTask;
1272
1273 explicit Unmapper(MemoryAllocator* allocator)
1274 : allocator_(allocator),
1275 pending_unmapping_tasks_semaphore_(0),
1276 concurrent_unmapping_tasks_active_(0) {}
1277
1278 void AddMemoryChunkSafe(MemoryChunk* chunk) {
1279 if ((chunk->size() == Page::kPageSize) &&
1280 (chunk->executable() == EXECUTABLE)) {
1281 AddMemoryChunkSafe<kRegular>(chunk);
1282 } else {
1283 AddMemoryChunkSafe<kNonRegular>(chunk);
1284 }
1285 }
1286
1287 MemoryChunk* TryGetPooledMemoryChunkSafe() {
1288 // Procedure:
1289 // (1) Try to get a chunk that was declared as pooled and already has
1290 // been uncommitted.
1291 // (2) Try to steal any memory chunk of kPageSize that would've been
1292 // unmapped.
1293 MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
1294 if (chunk == nullptr) {
1295 chunk = GetMemoryChunkSafe<kRegular>();
1296 if (chunk != nullptr) {
1297 // For stolen chunks we need to manually free any allocated memory.
1298 chunk->ReleaseAllocatedMemory();
1299 }
1300 }
1301 return chunk;
1302 }
1303
1304 void FreeQueuedChunks();
1305 bool WaitUntilCompleted();
1306
1307 private:
1308 enum ChunkQueueType {
1309 kRegular, // Pages of kPageSize that do not live in a CodeRange and
1310 // can thus be used for stealing.
1311 kNonRegular, // Large chunks and executable chunks.
1312 kPooled, // Pooled chunks, already uncommited and ready for reuse.
1313 kNumberOfChunkQueues,
1314 };
1315
1316 template <ChunkQueueType type>
1317 void AddMemoryChunkSafe(MemoryChunk* chunk) {
1318 base::LockGuard<base::Mutex> guard(&mutex_);
1319 chunks_[type].push_back(chunk);
1320 }
1321
1322 template <ChunkQueueType type>
1323 MemoryChunk* GetMemoryChunkSafe() {
1324 base::LockGuard<base::Mutex> guard(&mutex_);
1325 if (chunks_[type].empty()) return nullptr;
1326 MemoryChunk* chunk = chunks_[type].front();
1327 chunks_[type].pop_front();
1328 return chunk;
1329 }
1330
1331 void PerformFreeMemoryOnQueuedChunks();
1332
1333 base::Mutex mutex_;
1334 MemoryAllocator* allocator_;
1335 std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
1336 base::Semaphore pending_unmapping_tasks_semaphore_;
1337 intptr_t concurrent_unmapping_tasks_active_;
1338
1339 friend class MemoryAllocator;
1340 };
1341
1342 enum AllocationMode { 1264 enum AllocationMode {
1343 kRegular, 1265 kRegular,
1344 kPooled, 1266 kPooled,
1345 }; 1267 };
1346 enum FreeMode {
1347 kFull,
1348 kPreFreeAndQueue,
1349 kPooledAndQueue,
1350 };
1351 1268
1352 explicit MemoryAllocator(Isolate* isolate); 1269 explicit MemoryAllocator(Isolate* isolate);
1353 1270
1354 // Initializes its internal bookkeeping structures. 1271 // Initializes its internal bookkeeping structures.
1355 // Max capacity of the total space and executable memory limit. 1272 // Max capacity of the total space and executable memory limit.
1356 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable, 1273 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable,
1357 intptr_t code_range_size); 1274 intptr_t code_range_size);
1358 1275
1359 void TearDown(); 1276 void TearDown();
1360 1277
1361 // Allocates either Page or NewSpacePage from the allocator. AllocationMode 1278 // Allocates either Page or NewSpacePage from the allocator. AllocationMode
1362 // is used to indicate whether pooled allocation, which only works for 1279 // is used to indicate whether pooled allocation, which only works for
1363 // MemoryChunk::kPageSize, should be tried first. 1280 // MemoryChunk::kPageSize, should be tried first.
1364 template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular, 1281 template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular,
1365 typename SpaceType> 1282 typename SpaceType>
1366 PageType* AllocatePage(intptr_t size, SpaceType* owner, 1283 PageType* AllocatePage(intptr_t size, SpaceType* owner,
1367 Executability executable); 1284 Executability executable);
1368 1285
1369 template <MemoryAllocator::FreeMode mode = kFull> 1286 // PreFree logically frees the object, i.e., it takes care of the size
1287 // bookkeeping and calls the allocation callback.
1288 void PreFreeMemory(MemoryChunk* chunk);
1289
1290 // FreeMemory can be called concurrently when PreFree was executed before.
1291 void PerformFreeMemory(MemoryChunk* chunk);
1292
1293 // Free is a wrapper method. For kRegular AllocationMode it calls PreFree and
1294 // PerformFreeMemory together. For kPooled it will dispatch to pooled free.
1295 template <MemoryAllocator::AllocationMode mode = kRegular>
1370 void Free(MemoryChunk* chunk); 1296 void Free(MemoryChunk* chunk);
1371 1297
1372 // Returns allocated spaces in bytes. 1298 // Returns allocated spaces in bytes.
1373 intptr_t Size() { return size_.Value(); } 1299 intptr_t Size() { return size_.Value(); }
1374 1300
1375 // Returns allocated executable spaces in bytes. 1301 // Returns allocated executable spaces in bytes.
1376 intptr_t SizeExecutable() { return size_executable_.Value(); } 1302 intptr_t SizeExecutable() { return size_executable_.Value(); }
1377 1303
1378 // Returns the maximum available bytes of heaps. 1304 // Returns the maximum available bytes of heaps.
1379 intptr_t Available() { 1305 intptr_t Available() {
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
1465 DCHECK_NE(LO_SPACE, space); 1391 DCHECK_NE(LO_SPACE, space);
1466 return (space == CODE_SPACE) ? CodePageAreaSize() 1392 return (space == CODE_SPACE) ? CodePageAreaSize()
1467 : Page::kAllocatableMemory; 1393 : Page::kAllocatableMemory;
1468 } 1394 }
1469 1395
1470 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, 1396 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1471 Address start, size_t commit_size, 1397 Address start, size_t commit_size,
1472 size_t reserved_size); 1398 size_t reserved_size);
1473 1399
1474 CodeRange* code_range() { return code_range_; } 1400 CodeRange* code_range() { return code_range_; }
1475 Unmapper* unmapper() { return &unmapper_; }
1476 1401
1477 private: 1402 private:
1478 // PreFree logically frees the object, i.e., it takes care of the size
1479 // bookkeeping and calls the allocation callback.
1480 void PreFreeMemory(MemoryChunk* chunk);
1481
1482 // FreeMemory can be called concurrently when PreFree was executed before.
1483 void PerformFreeMemory(MemoryChunk* chunk);
1484
1485 // See AllocatePage for public interface. Note that currently we only support 1403 // See AllocatePage for public interface. Note that currently we only support
1486 // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize. 1404 // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1487 template <typename SpaceType> 1405 template <typename SpaceType>
1488 MemoryChunk* AllocatePagePooled(SpaceType* owner); 1406 MemoryChunk* AllocatePagePooled(SpaceType* owner);
1489 1407
1408 // Free that chunk into the pool.
1409 void FreePooled(MemoryChunk* chunk);
1410
1490 Isolate* isolate_; 1411 Isolate* isolate_;
1491 1412
1492 CodeRange* code_range_; 1413 CodeRange* code_range_;
1493 1414
1494 // Maximum space size in bytes. 1415 // Maximum space size in bytes.
1495 intptr_t capacity_; 1416 intptr_t capacity_;
1496 // Maximum subset of capacity_ that can be executable 1417 // Maximum subset of capacity_ that can be executable
1497 intptr_t capacity_executable_; 1418 intptr_t capacity_executable_;
1498 1419
1499 // Allocated space size in bytes. 1420 // Allocated space size in bytes.
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
1535 // values only if they did not change in between. 1456 // values only if they did not change in between.
1536 void* ptr = nullptr; 1457 void* ptr = nullptr;
1537 do { 1458 do {
1538 ptr = lowest_ever_allocated_.Value(); 1459 ptr = lowest_ever_allocated_.Value();
1539 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low)); 1460 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1540 do { 1461 do {
1541 ptr = highest_ever_allocated_.Value(); 1462 ptr = highest_ever_allocated_.Value();
1542 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); 1463 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
1543 } 1464 }
1544 1465
1466 List<MemoryChunk*> chunk_pool_;
1467
1545 base::VirtualMemory last_chunk_; 1468 base::VirtualMemory last_chunk_;
1546 Unmapper unmapper_;
1547 1469
1548 friend class TestCodeRangeScope; 1470 friend class TestCodeRangeScope;
1549 1471
1550 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); 1472 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1551 }; 1473 };
1552 1474
1553 1475
1554 // ----------------------------------------------------------------------------- 1476 // -----------------------------------------------------------------------------
1555 // Interface for heap object iterator to be implemented by all object space 1477 // Interface for heap object iterator to be implemented by all object space
1556 // object iterators. 1478 // object iterators.
(...skipping 1631 matching lines...) Expand 10 before | Expand all | Expand 10 after
3188 count = 0; 3110 count = 0;
3189 } 3111 }
3190 // Must be small, since an iteration is used for lookup. 3112 // Must be small, since an iteration is used for lookup.
3191 static const int kMaxComments = 64; 3113 static const int kMaxComments = 64;
3192 }; 3114 };
3193 #endif 3115 #endif
3194 } // namespace internal 3116 } // namespace internal
3195 } // namespace v8 3117 } // namespace v8
3196 3118
3197 #endif // V8_HEAP_SPACES_H_ 3119 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698