Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(212)

Side by Side Diff: src/heap/spaces.h

Issue 2395563002: [heap] Use size_t throughout MemoryAllocator (Closed)
Patch Set: Fix compilation Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include <list> 8 #include <list>
9 #include <memory> 9 #include <memory>
10 #include <unordered_set> 10 #include <unordered_set>
(...skipping 1212 matching lines...) Expand 10 before | Expand all | Expand 10 after
1223 base::Semaphore pending_unmapping_tasks_semaphore_; 1223 base::Semaphore pending_unmapping_tasks_semaphore_;
1224 intptr_t concurrent_unmapping_tasks_active_; 1224 intptr_t concurrent_unmapping_tasks_active_;
1225 1225
1226 friend class MemoryAllocator; 1226 friend class MemoryAllocator;
1227 }; 1227 };
1228 1228
1229 enum AllocationMode { 1229 enum AllocationMode {
1230 kRegular, 1230 kRegular,
1231 kPooled, 1231 kPooled,
1232 }; 1232 };
1233
1233 enum FreeMode { 1234 enum FreeMode {
1234 kFull, 1235 kFull,
1235 kPreFreeAndQueue, 1236 kPreFreeAndQueue,
1236 kPooledAndQueue, 1237 kPooledAndQueue,
1237 }; 1238 };
1238 1239
1240 static int CodePageGuardStartOffset();
Michael Lippautz 2016/10/04 19:44:50 Just moved up. size_t'ing these constants is an ex
1241
1242 static int CodePageGuardSize();
1243
1244 static int CodePageAreaStartOffset();
1245
1246 static int CodePageAreaEndOffset();
1247
1248 static int CodePageAreaSize() {
1249 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1250 }
1251
1252 static int PageAreaSize(AllocationSpace space) {
1253 DCHECK_NE(LO_SPACE, space);
1254 return (space == CODE_SPACE) ? CodePageAreaSize()
1255 : Page::kAllocatableMemory;
1256 }
1257
1239 explicit MemoryAllocator(Isolate* isolate); 1258 explicit MemoryAllocator(Isolate* isolate);
1240 1259
1241 // Initializes its internal bookkeeping structures. 1260 // Initializes its internal bookkeeping structures.
1242 // Max capacity of the total space and executable memory limit. 1261 // Max capacity of the total space and executable memory limit.
1243 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable, 1262 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable,
1244 intptr_t code_range_size); 1263 intptr_t code_range_size);
1245 1264
1246 void TearDown(); 1265 void TearDown();
1247 1266
1248 // Allocates a Page from the allocator. AllocationMode is used to indicate 1267 // Allocates a Page from the allocator. AllocationMode is used to indicate
1249 // whether pooled allocation, which only works for MemoryChunk::kPageSize, 1268 // whether pooled allocation, which only works for MemoryChunk::kPageSize,
1250 // should be tried first. 1269 // should be tried first.
1251 template <MemoryAllocator::AllocationMode alloc_mode = kRegular, 1270 template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
1252 typename SpaceType> 1271 typename SpaceType>
1253 Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable); 1272 Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable);
1254 1273
1255 LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner, 1274 LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner,
1256 Executability executable); 1275 Executability executable);
1257 1276
1258 template <MemoryAllocator::FreeMode mode = kFull> 1277 template <MemoryAllocator::FreeMode mode = kFull>
1259 void Free(MemoryChunk* chunk); 1278 void Free(MemoryChunk* chunk);
1260 1279
1261 bool CanFreeMemoryChunk(MemoryChunk* chunk); 1280 bool CanFreeMemoryChunk(MemoryChunk* chunk);
1262 1281
1263 // Returns allocated spaces in bytes. 1282 // Returns allocated spaces in bytes.
1264 intptr_t Size() { return size_.Value(); } 1283 size_t Size() { return size_.Value(); }
1265 1284
1266 // Returns allocated executable spaces in bytes. 1285 // Returns allocated executable spaces in bytes.
1267 intptr_t SizeExecutable() { return size_executable_.Value(); } 1286 size_t SizeExecutable() { return size_executable_.Value(); }
1268 1287
1269 // Returns the maximum available bytes of heaps. 1288 // Returns the maximum available bytes of heaps.
1270 intptr_t Available() { 1289 size_t Available() {
1271 intptr_t size = Size(); 1290 const size_t size = Size();
1272 return capacity_ < size ? 0 : capacity_ - size; 1291 return capacity_ < size ? 0 : capacity_ - size;
1273 } 1292 }
1274 1293
1275 // Returns the maximum available executable bytes of heaps. 1294 // Returns the maximum available executable bytes of heaps.
1276 intptr_t AvailableExecutable() { 1295 size_t AvailableExecutable() {
1277 intptr_t executable_size = SizeExecutable(); 1296 const size_t executable_size = SizeExecutable();
1278 if (capacity_executable_ < executable_size) return 0; 1297 if (capacity_executable_ < executable_size) return 0;
1279 return capacity_executable_ - executable_size; 1298 return capacity_executable_ - executable_size;
1280 } 1299 }
1281 1300
1282 // Returns maximum available bytes that the old space can have. 1301 // Returns maximum available bytes that the old space can have.
1283 intptr_t MaxAvailable() { 1302 size_t MaxAvailable() {
1284 return (Available() / Page::kPageSize) * Page::kAllocatableMemory; 1303 return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
1285 } 1304 }
1286 1305
1287 // Returns an indication of whether a pointer is in a space that has 1306 // Returns an indication of whether a pointer is in a space that has
1288 // been allocated by this MemoryAllocator. 1307 // been allocated by this MemoryAllocator.
1289 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) { 1308 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
1290 return address < lowest_ever_allocated_.Value() || 1309 return address < lowest_ever_allocated_.Value() ||
1291 address >= highest_ever_allocated_.Value(); 1310 address >= highest_ever_allocated_.Value();
1292 } 1311 }
1293 1312
1294 #ifdef DEBUG
1295 // Reports statistic info of the space.
1296 void ReportStatistics();
1297 #endif
1298
1299 // Returns a MemoryChunk in which the memory region from commit_area_size to 1313 // Returns a MemoryChunk in which the memory region from commit_area_size to
1300 // reserve_area_size of the chunk area is reserved but not committed, it 1314 // reserve_area_size of the chunk area is reserved but not committed, it
1301 // could be committed later by calling MemoryChunk::CommitArea. 1315 // could be committed later by calling MemoryChunk::CommitArea.
1302 MemoryChunk* AllocateChunk(intptr_t reserve_area_size, 1316 MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1303 intptr_t commit_area_size, 1317 intptr_t commit_area_size,
1304 Executability executable, Space* space); 1318 Executability executable, Space* space);
1305 1319
1306 void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink); 1320 void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
1307 1321
1308 Address ReserveAlignedMemory(size_t requested, size_t alignment, 1322 Address ReserveAlignedMemory(size_t requested, size_t alignment,
(...skipping 17 matching lines...) Expand all
1326 // Uncommit a contiguous block of memory [start..(start+size)[. 1340 // Uncommit a contiguous block of memory [start..(start+size)[.
1327 // start is not NULL, the size is greater than zero, and the 1341 // start is not NULL, the size is greater than zero, and the
1328 // block is contained in the initial chunk. Returns true if it succeeded 1342 // block is contained in the initial chunk. Returns true if it succeeded
1329 // and false otherwise. 1343 // and false otherwise.
1330 bool UncommitBlock(Address start, size_t size); 1344 bool UncommitBlock(Address start, size_t size);
1331 1345
1332 // Zaps a contiguous block of memory [start..(start+size)[ thus 1346 // Zaps a contiguous block of memory [start..(start+size)[ thus
1333 // filling it up with a recognizable non-NULL bit pattern. 1347 // filling it up with a recognizable non-NULL bit pattern.
1334 void ZapBlock(Address start, size_t size); 1348 void ZapBlock(Address start, size_t size);
1335 1349
1336 static int CodePageGuardStartOffset();
1337
1338 static int CodePageGuardSize();
1339
1340 static int CodePageAreaStartOffset();
1341
1342 static int CodePageAreaEndOffset();
1343
1344 static int CodePageAreaSize() {
1345 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1346 }
1347
1348 static int PageAreaSize(AllocationSpace space) {
1349 DCHECK_NE(LO_SPACE, space);
1350 return (space == CODE_SPACE) ? CodePageAreaSize()
1351 : Page::kAllocatableMemory;
1352 }
1353
1354 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, 1350 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1355 Address start, size_t commit_size, 1351 Address start, size_t commit_size,
1356 size_t reserved_size); 1352 size_t reserved_size);
1357 1353
1358 CodeRange* code_range() { return code_range_; } 1354 CodeRange* code_range() { return code_range_; }
1359 Unmapper* unmapper() { return &unmapper_; } 1355 Unmapper* unmapper() { return &unmapper_; }
1360 1356
1357 #ifdef DEBUG
1358 // Reports statistic info of the space.
1359 void ReportStatistics();
1360 #endif
1361
1361 private: 1362 private:
1362 // PreFree logically frees the object, i.e., it takes care of the size 1363 // PreFree logically frees the object, i.e., it takes care of the size
1363 // bookkeeping and calls the allocation callback. 1364 // bookkeeping and calls the allocation callback.
1364 void PreFreeMemory(MemoryChunk* chunk); 1365 void PreFreeMemory(MemoryChunk* chunk);
1365 1366
1366 // FreeMemory can be called concurrently when PreFree was executed before. 1367 // FreeMemory can be called concurrently when PreFree was executed before.
1367 void PerformFreeMemory(MemoryChunk* chunk); 1368 void PerformFreeMemory(MemoryChunk* chunk);
1368 1369
1369 // See AllocatePage for public interface. Note that currently we only support 1370 // See AllocatePage for public interface. Note that currently we only support
1370 // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize. 1371 // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1371 template <typename SpaceType> 1372 template <typename SpaceType>
1372 MemoryChunk* AllocatePagePooled(SpaceType* owner); 1373 MemoryChunk* AllocatePagePooled(SpaceType* owner);
1373 1374
1374 Isolate* isolate_;
1375
1376 CodeRange* code_range_;
1377
1378 // Maximum space size in bytes.
1379 intptr_t capacity_;
1380 // Maximum subset of capacity_ that can be executable
1381 intptr_t capacity_executable_;
1382
1383 // Allocated space size in bytes.
1384 base::AtomicNumber<intptr_t> size_;
1385 // Allocated executable space size in bytes.
1386 base::AtomicNumber<intptr_t> size_executable_;
1387
1388 // We keep the lowest and highest addresses allocated as a quick way
1389 // of determining that pointers are outside the heap. The estimate is
1390 // conservative, i.e. not all addrsses in 'allocated' space are allocated
1391 // to our heap. The range is [lowest, highest[, inclusive on the low end
1392 // and exclusive on the high end.
1393 base::AtomicValue<void*> lowest_ever_allocated_;
1394 base::AtomicValue<void*> highest_ever_allocated_;
1395
1396 // Initializes pages in a chunk. Returns the first page address. 1375 // Initializes pages in a chunk. Returns the first page address.
1397 // This function and GetChunkId() are provided for the mark-compact 1376 // This function and GetChunkId() are provided for the mark-compact
1398 // collector to rebuild page headers in the from space, which is 1377 // collector to rebuild page headers in the from space, which is
1399 // used as a marking stack and its page headers are destroyed. 1378 // used as a marking stack and its page headers are destroyed.
1400 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, 1379 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1401 PagedSpace* owner); 1380 PagedSpace* owner);
1402 1381
1403 void UpdateAllocatedSpaceLimits(void* low, void* high) { 1382 void UpdateAllocatedSpaceLimits(void* low, void* high) {
1404 // The use of atomic primitives does not guarantee correctness (wrt. 1383 // The use of atomic primitives does not guarantee correctness (wrt.
1405 // desired semantics) by default. The loop here ensures that we update the 1384 // desired semantics) by default. The loop here ensures that we update the
1406 // values only if they did not change in between. 1385 // values only if they did not change in between.
1407 void* ptr = nullptr; 1386 void* ptr = nullptr;
1408 do { 1387 do {
1409 ptr = lowest_ever_allocated_.Value(); 1388 ptr = lowest_ever_allocated_.Value();
1410 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low)); 1389 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1411 do { 1390 do {
1412 ptr = highest_ever_allocated_.Value(); 1391 ptr = highest_ever_allocated_.Value();
1413 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); 1392 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
1414 } 1393 }
1415 1394
1395 Isolate* isolate_;
1396 CodeRange* code_range_;
1397
1398 // Maximum space size in bytes.
1399 size_t capacity_;
1400 // Maximum subset of capacity_ that can be executable
1401 size_t capacity_executable_;
1402
1403 // Allocated space size in bytes.
1404 base::AtomicNumber<size_t> size_;
1405 // Allocated executable space size in bytes.
1406 base::AtomicNumber<size_t> size_executable_;
1407
1408 // We keep the lowest and highest addresses allocated as a quick way
1409 // of determining that pointers are outside the heap. The estimate is
1410 // conservative, i.e. not all addresses in 'allocated' space are allocated
1411 // to our heap. The range is [lowest, highest[, inclusive on the low end
1412 // and exclusive on the high end.
1413 base::AtomicValue<void*> lowest_ever_allocated_;
1414 base::AtomicValue<void*> highest_ever_allocated_;
1415
1416 base::VirtualMemory last_chunk_; 1416 base::VirtualMemory last_chunk_;
1417 Unmapper unmapper_; 1417 Unmapper unmapper_;
1418 1418
1419 friend class TestCodeRangeScope; 1419 friend class TestCodeRangeScope;
1420 1420
1421 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); 1421 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1422 }; 1422 };
1423 1423
1424 1424
1425 // ----------------------------------------------------------------------------- 1425 // -----------------------------------------------------------------------------
(...skipping 1521 matching lines...) Expand 10 before | Expand all | Expand 10 after
2947 PageIterator old_iterator_; 2947 PageIterator old_iterator_;
2948 PageIterator code_iterator_; 2948 PageIterator code_iterator_;
2949 PageIterator map_iterator_; 2949 PageIterator map_iterator_;
2950 LargePageIterator lo_iterator_; 2950 LargePageIterator lo_iterator_;
2951 }; 2951 };
2952 2952
2953 } // namespace internal 2953 } // namespace internal
2954 } // namespace v8 2954 } // namespace v8
2955 2955
2956 #endif // V8_HEAP_SPACES_H_ 2956 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/heap/heap.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698