| OLD | NEW |
| 1 // Copyright 2006-2010 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 16 matching lines...) Expand all Loading... |
| 27 | 27 |
| 28 #ifndef V8_SPACES_H_ | 28 #ifndef V8_SPACES_H_ |
| 29 #define V8_SPACES_H_ | 29 #define V8_SPACES_H_ |
| 30 | 30 |
| 31 #include "list-inl.h" | 31 #include "list-inl.h" |
| 32 #include "log.h" | 32 #include "log.h" |
| 33 | 33 |
| 34 namespace v8 { | 34 namespace v8 { |
| 35 namespace internal { | 35 namespace internal { |
| 36 | 36 |
| 37 class Isolate; |
| 38 |
| 37 // ----------------------------------------------------------------------------- | 39 // ----------------------------------------------------------------------------- |
| 38 // Heap structures: | 40 // Heap structures: |
| 39 // | 41 // |
| 40 // A JS heap consists of a young generation, an old generation, and a large | 42 // A JS heap consists of a young generation, an old generation, and a large |
| 41 // object space. The young generation is divided into two semispaces. A | 43 // object space. The young generation is divided into two semispaces. A |
| 42 // scavenger implements Cheney's copying algorithm. The old generation is | 44 // scavenger implements Cheney's copying algorithm. The old generation is |
| 43 // separated into a map space and an old object space. The map space contains | 45 // separated into a map space and an old object space. The map space contains |
| 44 // all (and only) map objects, the rest of old objects go into the old space. | 46 // all (and only) map objects, the rest of old objects go into the old space. |
| 45 // The old generation is collected by a mark-sweep-compact collector. | 47 // The old generation is collected by a mark-sweep-compact collector. |
| 46 // | 48 // |
| (...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 234 Address end, | 236 Address end, |
| 235 bool reaches_limit); | 237 bool reaches_limit); |
| 236 | 238 |
| 237 // Page size in bytes. This must be a multiple of the OS page size. | 239 // Page size in bytes. This must be a multiple of the OS page size. |
| 238 static const int kPageSize = 1 << kPageSizeBits; | 240 static const int kPageSize = 1 << kPageSizeBits; |
| 239 | 241 |
| 240 // Page size mask. | 242 // Page size mask. |
| 241 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; | 243 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; |
| 242 | 244 |
| 243 static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize + | 245 static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize + |
| 244 kIntSize + kPointerSize; | 246 kIntSize + kPointerSize + kPointerSize; |
| 245 | 247 |
| 246 // The start offset of the object area in a page. Aligned to both maps and | 248 // The start offset of the object area in a page. Aligned to both maps and |
| 247 // code alignment to be suitable for both. | 249 // code alignment to be suitable for both. |
| 248 static const int kObjectStartOffset = | 250 static const int kObjectStartOffset = |
| 249 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize)); | 251 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize)); |
| 250 | 252 |
| 251 // Object area size in bytes. | 253 // Object area size in bytes. |
| 252 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; | 254 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; |
| 253 | 255 |
| 254 // Maximum object size that fits in a page. | 256 // Maximum object size that fits in a page. |
| (...skipping 24 matching lines...) Expand all Loading... |
| 279 // flag at the beginning of the next scavenge and each page becomes marked as | 281 // flag at the beginning of the next scavenge and each page becomes marked as |
| 280 // having a valid watermark. | 282 // having a valid watermark. |
| 281 // | 283 // |
| 282 // The following invariant must hold for pages in old pointer and map spaces: | 284 // The following invariant must hold for pages in old pointer and map spaces: |
| 283 // If page is in use then page is marked as having invalid watermark at | 285 // If page is in use then page is marked as having invalid watermark at |
| 284 // the beginning and at the end of any GC. | 286 // the beginning and at the end of any GC. |
| 285 // | 287 // |
| 286 // This invariant guarantees that after flipping flag meaning at the | 288 // This invariant guarantees that after flipping flag meaning at the |
| 287 // beginning of scavenge all pages in use will be marked as having valid | 289 // beginning of scavenge all pages in use will be marked as having valid |
| 288 // watermark. | 290 // watermark. |
| 289 static inline void FlipMeaningOfInvalidatedWatermarkFlag(); | 291 static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap); |
| 290 | 292 |
| 291 // Returns true if the page allocation watermark was not altered during | 293 // Returns true if the page allocation watermark was not altered during |
| 292 // scavenge. | 294 // scavenge. |
| 293 inline bool IsWatermarkValid(); | 295 inline bool IsWatermarkValid(); |
| 294 | 296 |
| 295 inline void InvalidateWatermark(bool value); | 297 inline void InvalidateWatermark(bool value); |
| 296 | 298 |
| 297 inline bool GetPageFlag(PageFlag flag); | 299 inline bool GetPageFlag(PageFlag flag); |
| 298 inline void SetPageFlag(PageFlag flag, bool value); | 300 inline void SetPageFlag(PageFlag flag, bool value); |
| 299 inline void ClearPageFlags(); | 301 inline void ClearPageFlags(); |
| 300 | 302 |
| 301 inline void ClearGCFields(); | 303 inline void ClearGCFields(); |
| 302 | 304 |
| 303 static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1; | 305 static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1; |
| 304 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1; | 306 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1; |
| 305 static const uint32_t kAllocationWatermarkOffsetMask = | 307 static const uint32_t kAllocationWatermarkOffsetMask = |
| 306 ((1 << kAllocationWatermarkOffsetBits) - 1) << | 308 ((1 << kAllocationWatermarkOffsetBits) - 1) << |
| 307 kAllocationWatermarkOffsetShift; | 309 kAllocationWatermarkOffsetShift; |
| 308 | 310 |
| 309 static const uint32_t kFlagsMask = | 311 static const uint32_t kFlagsMask = |
| 310 ((1 << kAllocationWatermarkOffsetShift) - 1); | 312 ((1 << kAllocationWatermarkOffsetShift) - 1); |
| 311 | 313 |
| 312 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >= | 314 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >= |
| 313 kAllocationWatermarkOffsetBits); | 315 kAllocationWatermarkOffsetBits); |
| 314 | 316 |
| 315 // This field contains the meaning of the WATERMARK_INVALIDATED flag. | |
| 316 // Instead of clearing this flag from all pages we just flip | |
| 317 // its meaning at the beginning of a scavenge. | |
| 318 static intptr_t watermark_invalidated_mark_; | |
| 319 | |
| 320 //--------------------------------------------------------------------------- | 317 //--------------------------------------------------------------------------- |
| 321 // Page header description. | 318 // Page header description. |
| 322 // | 319 // |
| 323 // If a page is not in the large object space, the first word, | 320 // If a page is not in the large object space, the first word, |
| 324 // opaque_header, encodes the next page address (aligned to kPageSize 8K) | 321 // opaque_header, encodes the next page address (aligned to kPageSize 8K) |
| 325 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use | 322 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use |
| 326 // opaque_header. The value range of the opaque_header is [0..kPageSize[, | 323 // opaque_header. The value range of the opaque_header is [0..kPageSize[, |
| 327 // or [next_page_start, next_page_end[. It cannot point to a valid address | 324 // or [next_page_start, next_page_end[. It cannot point to a valid address |
| 328 // in the current page. If a page is in the large object space, the first | 325 // in the current page. If a page is in the large object space, the first |
| 329 // word *may* (if the page start and large object chunk start are the | 326 // word *may* (if the page start and large object chunk start are the |
| (...skipping 16 matching lines...) Expand all Loading... |
| 346 uint32_t dirty_regions_; | 343 uint32_t dirty_regions_; |
| 347 | 344 |
| 348 // The index of the page in its owner space. | 345 // The index of the page in its owner space. |
| 349 int mc_page_index; | 346 int mc_page_index; |
| 350 | 347 |
| 351 // During mark-compact collections this field contains the forwarding address | 348 // During mark-compact collections this field contains the forwarding address |
| 352 // of the first live object in this page. | 349 // of the first live object in this page. |
| 353 // During scavenge collection this field is used to store allocation watermark | 350 // During scavenge collection this field is used to store allocation watermark |
| 354 // if it is altered during scavenge. | 351 // if it is altered during scavenge. |
| 355 Address mc_first_forwarded; | 352 Address mc_first_forwarded; |
| 353 |
| 354 Heap* heap_; |
| 356 }; | 355 }; |
| 357 | 356 |
| 358 | 357 |
| 359 // ---------------------------------------------------------------------------- | 358 // ---------------------------------------------------------------------------- |
| 360 // Space is the abstract superclass for all allocation spaces. | 359 // Space is the abstract superclass for all allocation spaces. |
| 361 class Space : public Malloced { | 360 class Space : public Malloced { |
| 362 public: | 361 public: |
| 363 Space(AllocationSpace id, Executability executable) | 362 Space(Heap* heap, AllocationSpace id, Executability executable) |
| 364 : id_(id), executable_(executable) {} | 363 : heap_(heap), id_(id), executable_(executable) {} |
| 365 | 364 |
| 366 virtual ~Space() {} | 365 virtual ~Space() {} |
| 367 | 366 |
| 367 Heap* heap() const { return heap_; } |
| 368 |
| 368 // Does the space need executable memory? | 369 // Does the space need executable memory? |
| 369 Executability executable() { return executable_; } | 370 Executability executable() { return executable_; } |
| 370 | 371 |
| 371 // Identity used in error reporting. | 372 // Identity used in error reporting. |
| 372 AllocationSpace identity() { return id_; } | 373 AllocationSpace identity() { return id_; } |
| 373 | 374 |
| 374 // Returns allocated size. | 375 // Returns allocated size. |
| 375 virtual intptr_t Size() = 0; | 376 virtual intptr_t Size() = 0; |
| 376 | 377 |
| 377 // Returns size of objects. Can differ from the allocated size | 378 // Returns size of objects. Can differ from the allocated size |
| (...skipping 12 matching lines...) Expand all Loading... |
| 390 | 391 |
| 391 // After calling this we can allocate a certain number of bytes using only | 392 // After calling this we can allocate a certain number of bytes using only |
| 392 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope) | 393 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope) |
| 393 // without using freelists or causing a GC. This is used by partial | 394 // without using freelists or causing a GC. This is used by partial |
| 394 // snapshots. It returns true of space was reserved or false if a GC is | 395 // snapshots. It returns true of space was reserved or false if a GC is |
| 395 // needed. For paged spaces the space requested must include the space wasted | 396 // needed. For paged spaces the space requested must include the space wasted |
| 396 // at the end of each when allocating linearly. | 397 // at the end of each when allocating linearly. |
| 397 virtual bool ReserveSpace(int bytes) = 0; | 398 virtual bool ReserveSpace(int bytes) = 0; |
| 398 | 399 |
| 399 private: | 400 private: |
| 401 Heap* heap_; |
| 400 AllocationSpace id_; | 402 AllocationSpace id_; |
| 401 Executability executable_; | 403 Executability executable_; |
| 402 }; | 404 }; |
| 403 | 405 |
| 404 | 406 |
| 405 // ---------------------------------------------------------------------------- | 407 // ---------------------------------------------------------------------------- |
| 406 // All heap objects containing executable code (code objects) must be allocated | 408 // All heap objects containing executable code (code objects) must be allocated |
| 407 // from a 2 GB range of memory, so that they can call each other using 32-bit | 409 // from a 2 GB range of memory, so that they can call each other using 32-bit |
| 408 // displacements. This happens automatically on 32-bit platforms, where 32-bit | 410 // displacements. This happens automatically on 32-bit platforms, where 32-bit |
| 409 // displacements cover the entire 4GB virtual address space. On 64-bit | 411 // displacements cover the entire 4GB virtual address space. On 64-bit |
| 410 // platforms, we support this using the CodeRange object, which reserves and | 412 // platforms, we support this using the CodeRange object, which reserves and |
| 411 // manages a range of virtual memory. | 413 // manages a range of virtual memory. |
| 412 class CodeRange : public AllStatic { | 414 class CodeRange { |
| 413 public: | 415 public: |
| 414 // Reserves a range of virtual memory, but does not commit any of it. | 416 // Reserves a range of virtual memory, but does not commit any of it. |
| 415 // Can only be called once, at heap initialization time. | 417 // Can only be called once, at heap initialization time. |
| 416 // Returns false on failure. | 418 // Returns false on failure. |
| 417 static bool Setup(const size_t requested_size); | 419 bool Setup(const size_t requested_size); |
| 418 | 420 |
| 419 // Frees the range of virtual memory, and frees the data structures used to | 421 // Frees the range of virtual memory, and frees the data structures used to |
| 420 // manage it. | 422 // manage it. |
| 421 static void TearDown(); | 423 void TearDown(); |
| 422 | 424 |
| 423 static bool exists() { return code_range_ != NULL; } | 425 bool exists() { return code_range_ != NULL; } |
| 424 static bool contains(Address address) { | 426 bool contains(Address address) { |
| 425 if (code_range_ == NULL) return false; | 427 if (code_range_ == NULL) return false; |
| 426 Address start = static_cast<Address>(code_range_->address()); | 428 Address start = static_cast<Address>(code_range_->address()); |
| 427 return start <= address && address < start + code_range_->size(); | 429 return start <= address && address < start + code_range_->size(); |
| 428 } | 430 } |
| 429 | 431 |
| 430 // Allocates a chunk of memory from the large-object portion of | 432 // Allocates a chunk of memory from the large-object portion of |
| 431 // the code range. On platforms with no separate code range, should | 433 // the code range. On platforms with no separate code range, should |
| 432 // not be called. | 434 // not be called. |
| 433 MUST_USE_RESULT static void* AllocateRawMemory(const size_t requested, | 435 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested, |
| 434 size_t* allocated); | 436 size_t* allocated); |
| 435 static void FreeRawMemory(void* buf, size_t length); | 437 void FreeRawMemory(void* buf, size_t length); |
| 436 | 438 |
| 437 private: | 439 private: |
| 440 CodeRange(); |
| 441 |
| 438 // The reserved range of virtual memory that all code objects are put in. | 442 // The reserved range of virtual memory that all code objects are put in. |
| 439 static VirtualMemory* code_range_; | 443 VirtualMemory* code_range_; |
| 440 // Plain old data class, just a struct plus a constructor. | 444 // Plain old data class, just a struct plus a constructor. |
| 441 class FreeBlock { | 445 class FreeBlock { |
| 442 public: | 446 public: |
| 443 FreeBlock(Address start_arg, size_t size_arg) | 447 FreeBlock(Address start_arg, size_t size_arg) |
| 444 : start(start_arg), size(size_arg) {} | 448 : start(start_arg), size(size_arg) {} |
| 445 FreeBlock(void* start_arg, size_t size_arg) | 449 FreeBlock(void* start_arg, size_t size_arg) |
| 446 : start(static_cast<Address>(start_arg)), size(size_arg) {} | 450 : start(static_cast<Address>(start_arg)), size(size_arg) {} |
| 447 | 451 |
| 448 Address start; | 452 Address start; |
| 449 size_t size; | 453 size_t size; |
| 450 }; | 454 }; |
| 451 | 455 |
| 452 // Freed blocks of memory are added to the free list. When the allocation | 456 // Freed blocks of memory are added to the free list. When the allocation |
| 453 // list is exhausted, the free list is sorted and merged to make the new | 457 // list is exhausted, the free list is sorted and merged to make the new |
| 454 // allocation list. | 458 // allocation list. |
| 455 static List<FreeBlock> free_list_; | 459 List<FreeBlock> free_list_; |
| 456 // Memory is allocated from the free blocks on the allocation list. | 460 // Memory is allocated from the free blocks on the allocation list. |
| 457 // The block at current_allocation_block_index_ is the current block. | 461 // The block at current_allocation_block_index_ is the current block. |
| 458 static List<FreeBlock> allocation_list_; | 462 List<FreeBlock> allocation_list_; |
| 459 static int current_allocation_block_index_; | 463 int current_allocation_block_index_; |
| 460 | 464 |
| 461 // Finds a block on the allocation list that contains at least the | 465 // Finds a block on the allocation list that contains at least the |
| 462 // requested amount of memory. If none is found, sorts and merges | 466 // requested amount of memory. If none is found, sorts and merges |
| 463 // the existing free memory blocks, and searches again. | 467 // the existing free memory blocks, and searches again. |
| 464 // If none can be found, terminates V8 with FatalProcessOutOfMemory. | 468 // If none can be found, terminates V8 with FatalProcessOutOfMemory. |
| 465 static void GetNextAllocationBlock(size_t requested); | 469 void GetNextAllocationBlock(size_t requested); |
| 466 // Compares the start addresses of two free blocks. | 470 // Compares the start addresses of two free blocks. |
| 467 static int CompareFreeBlockAddress(const FreeBlock* left, | 471 static int CompareFreeBlockAddress(const FreeBlock* left, |
| 468 const FreeBlock* right); | 472 const FreeBlock* right); |
| 473 |
| 474 friend class Isolate; |
| 475 |
| 476 Isolate* isolate_; |
| 477 |
| 478 DISALLOW_COPY_AND_ASSIGN(CodeRange); |
| 469 }; | 479 }; |
| 470 | 480 |
| 471 | 481 |
| 472 // ---------------------------------------------------------------------------- | 482 // ---------------------------------------------------------------------------- |
| 473 // A space acquires chunks of memory from the operating system. The memory | 483 // A space acquires chunks of memory from the operating system. The memory |
| 474 // allocator manages chunks for the paged heap spaces (old space and map | 484 // allocator manages chunks for the paged heap spaces (old space and map |
| 475 // space). A paged chunk consists of pages. Pages in a chunk have contiguous | 485 // space). A paged chunk consists of pages. Pages in a chunk have contiguous |
| 476 // addresses and are linked as a list. | 486 // addresses and are linked as a list. |
| 477 // | 487 // |
| 478 // The allocator keeps an initial chunk which is used for the new space. The | 488 // The allocator keeps an initial chunk which is used for the new space. The |
| 479 // leftover regions of the initial chunk are used for the initial chunks of | 489 // leftover regions of the initial chunk are used for the initial chunks of |
| 480 // old space and map space if they are big enough to hold at least one page. | 490 // old space and map space if they are big enough to hold at least one page. |
| 481 // The allocator assumes that there is one old space and one map space, each | 491 // The allocator assumes that there is one old space and one map space, each |
| 482 // expands the space by allocating kPagesPerChunk pages except the last | 492 // expands the space by allocating kPagesPerChunk pages except the last |
| 483 // expansion (before running out of space). The first chunk may contain fewer | 493 // expansion (before running out of space). The first chunk may contain fewer |
| 484 // than kPagesPerChunk pages as well. | 494 // than kPagesPerChunk pages as well. |
| 485 // | 495 // |
| 486 // The memory allocator also allocates chunks for the large object space, but | 496 // The memory allocator also allocates chunks for the large object space, but |
| 487 // they are managed by the space itself. The new space does not expand. | 497 // they are managed by the space itself. The new space does not expand. |
| 488 // | 498 // |
| 489 // The fact that pages for paged spaces are allocated and deallocated in chunks | 499 // The fact that pages for paged spaces are allocated and deallocated in chunks |
| 490 // induces a constraint on the order of pages in a linked lists. We say that | 500 // induces a constraint on the order of pages in a linked lists. We say that |
| 491 // pages are linked in the chunk-order if and only if every two consecutive | 501 // pages are linked in the chunk-order if and only if every two consecutive |
| 492 // pages from the same chunk are consecutive in the linked list. | 502 // pages from the same chunk are consecutive in the linked list. |
| 493 // | 503 // |
| 494 | 504 |
| 495 | 505 |
| 496 class MemoryAllocator : public AllStatic { | 506 class MemoryAllocator { |
| 497 public: | 507 public: |
| 498 // Initializes its internal bookkeeping structures. | 508 // Initializes its internal bookkeeping structures. |
| 499 // Max capacity of the total space and executable memory limit. | 509 // Max capacity of the total space and executable memory limit. |
| 500 static bool Setup(intptr_t max_capacity, intptr_t capacity_executable); | 510 bool Setup(intptr_t max_capacity, intptr_t capacity_executable); |
| 501 | 511 |
| 502 // Deletes valid chunks. | 512 // Deletes valid chunks. |
| 503 static void TearDown(); | 513 void TearDown(); |
| 504 | 514 |
| 505 // Reserves an initial address range of virtual memory to be split between | 515 // Reserves an initial address range of virtual memory to be split between |
| 506 // the two new space semispaces, the old space, and the map space. The | 516 // the two new space semispaces, the old space, and the map space. The |
| 507 // memory is not yet committed or assigned to spaces and split into pages. | 517 // memory is not yet committed or assigned to spaces and split into pages. |
| 508 // The initial chunk is unmapped when the memory allocator is torn down. | 518 // The initial chunk is unmapped when the memory allocator is torn down. |
| 509 // This function should only be called when there is not already a reserved | 519 // This function should only be called when there is not already a reserved |
| 510 // initial chunk (initial_chunk_ should be NULL). It returns the start | 520 // initial chunk (initial_chunk_ should be NULL). It returns the start |
| 511 // address of the initial chunk if successful, with the side effect of | 521 // address of the initial chunk if successful, with the side effect of |
| 512 // setting the initial chunk, or else NULL if unsuccessful and leaves the | 522 // setting the initial chunk, or else NULL if unsuccessful and leaves the |
| 513 // initial chunk NULL. | 523 // initial chunk NULL. |
| 514 static void* ReserveInitialChunk(const size_t requested); | 524 void* ReserveInitialChunk(const size_t requested); |
| 515 | 525 |
| 516 // Commits pages from an as-yet-unmanaged block of virtual memory into a | 526 // Commits pages from an as-yet-unmanaged block of virtual memory into a |
| 517 // paged space. The block should be part of the initial chunk reserved via | 527 // paged space. The block should be part of the initial chunk reserved via |
| 518 // a call to ReserveInitialChunk. The number of pages is always returned in | 528 // a call to ReserveInitialChunk. The number of pages is always returned in |
| 519 // the output parameter num_pages. This function assumes that the start | 529 // the output parameter num_pages. This function assumes that the start |
| 520 // address is non-null and that it is big enough to hold at least one | 530 // address is non-null and that it is big enough to hold at least one |
| 521 // page-aligned page. The call always succeeds, and num_pages is always | 531 // page-aligned page. The call always succeeds, and num_pages is always |
| 522 // greater than zero. | 532 // greater than zero. |
| 523 static Page* CommitPages(Address start, size_t size, PagedSpace* owner, | 533 Page* CommitPages(Address start, size_t size, PagedSpace* owner, |
| 524 int* num_pages); | 534 int* num_pages); |
| 525 | 535 |
| 526 // Commit a contiguous block of memory from the initial chunk. Assumes that | 536 // Commit a contiguous block of memory from the initial chunk. Assumes that |
| 527 // the address is not NULL, the size is greater than zero, and that the | 537 // the address is not NULL, the size is greater than zero, and that the |
| 528 // block is contained in the initial chunk. Returns true if it succeeded | 538 // block is contained in the initial chunk. Returns true if it succeeded |
| 529 // and false otherwise. | 539 // and false otherwise. |
| 530 static bool CommitBlock(Address start, size_t size, Executability executable); | 540 bool CommitBlock(Address start, size_t size, Executability executable); |
| 531 | 541 |
| 532 // Uncommit a contiguous block of memory [start..(start+size)[. | 542 // Uncommit a contiguous block of memory [start..(start+size)[. |
| 533 // start is not NULL, the size is greater than zero, and the | 543 // start is not NULL, the size is greater than zero, and the |
| 534 // block is contained in the initial chunk. Returns true if it succeeded | 544 // block is contained in the initial chunk. Returns true if it succeeded |
| 535 // and false otherwise. | 545 // and false otherwise. |
| 536 static bool UncommitBlock(Address start, size_t size); | 546 bool UncommitBlock(Address start, size_t size); |
| 537 | 547 |
| 538 // Zaps a contiguous block of memory [start..(start+size)[ thus | 548 // Zaps a contiguous block of memory [start..(start+size)[ thus |
| 539 // filling it up with a recognizable non-NULL bit pattern. | 549 // filling it up with a recognizable non-NULL bit pattern. |
| 540 static void ZapBlock(Address start, size_t size); | 550 void ZapBlock(Address start, size_t size); |
| 541 | 551 |
| 542 // Attempts to allocate the requested (non-zero) number of pages from the | 552 // Attempts to allocate the requested (non-zero) number of pages from the |
| 543 // OS. Fewer pages might be allocated than requested. If it fails to | 553 // OS. Fewer pages might be allocated than requested. If it fails to |
| 544 // allocate memory for the OS or cannot allocate a single page, this | 554 // allocate memory for the OS or cannot allocate a single page, this |
| 545 // function returns an invalid page pointer (NULL). The caller must check | 555 // function returns an invalid page pointer (NULL). The caller must check |
| 546 // whether the returned page is valid (by calling Page::is_valid()). It is | 556 // whether the returned page is valid (by calling Page::is_valid()). It is |
| 547 // guaranteed that allocated pages have contiguous addresses. The actual | 557 // guaranteed that allocated pages have contiguous addresses. The actual |
| 548 // number of allocated pages is returned in the output parameter | 558 // number of allocated pages is returned in the output parameter |
| 549 // allocated_pages. If the PagedSpace owner is executable and there is | 559 // allocated_pages. If the PagedSpace owner is executable and there is |
| 550 // a code range, the pages are allocated from the code range. | 560 // a code range, the pages are allocated from the code range. |
| 551 static Page* AllocatePages(int requested_pages, int* allocated_pages, | 561 Page* AllocatePages(int requested_pages, int* allocated_pages, |
| 552 PagedSpace* owner); | 562 PagedSpace* owner); |
| 553 | 563 |
| 554 // Frees pages from a given page and after. Requires pages to be | 564 // Frees pages from a given page and after. Requires pages to be |
| 555 // linked in chunk-order (see comment for class). | 565 // linked in chunk-order (see comment for class). |
| 556 // If 'p' is the first page of a chunk, pages from 'p' are freed | 566 // If 'p' is the first page of a chunk, pages from 'p' are freed |
| 557 // and this function returns an invalid page pointer. | 567 // and this function returns an invalid page pointer. |
| 558 // Otherwise, the function searches a page after 'p' that is | 568 // Otherwise, the function searches a page after 'p' that is |
| 559 // the first page of a chunk. Pages after the found page | 569 // the first page of a chunk. Pages after the found page |
| 560 // are freed and the function returns 'p'. | 570 // are freed and the function returns 'p'. |
| 561 static Page* FreePages(Page* p); | 571 Page* FreePages(Page* p); |
| 562 | 572 |
| 563 // Frees all pages owned by given space. | 573 // Frees all pages owned by given space. |
| 564 static void FreeAllPages(PagedSpace* space); | 574 void FreeAllPages(PagedSpace* space); |
| 565 | 575 |
| 566 // Allocates and frees raw memory of certain size. | 576 // Allocates and frees raw memory of certain size. |
| 567 // These are just thin wrappers around OS::Allocate and OS::Free, | 577 // These are just thin wrappers around OS::Allocate and OS::Free, |
| 568 // but keep track of allocated bytes as part of heap. | 578 // but keep track of allocated bytes as part of heap. |
| 569 // If the flag is EXECUTABLE and a code range exists, the requested | 579 // If the flag is EXECUTABLE and a code range exists, the requested |
| 570 // memory is allocated from the code range. If a code range exists | 580 // memory is allocated from the code range. If a code range exists |
| 571 // and the freed memory is in it, the code range manages the freed memory. | 581 // and the freed memory is in it, the code range manages the freed memory. |
| 572 MUST_USE_RESULT static void* AllocateRawMemory(const size_t requested, | 582 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested, |
| 573 size_t* allocated, | 583 size_t* allocated, |
| 574 Executability executable); | 584 Executability executable); |
| 575 static void FreeRawMemory(void* buf, | 585 void FreeRawMemory(void* buf, |
| 576 size_t length, | 586 size_t length, |
| 577 Executability executable); | 587 Executability executable); |
| 578 static void PerformAllocationCallback(ObjectSpace space, | 588 void PerformAllocationCallback(ObjectSpace space, |
| 579 AllocationAction action, | 589 AllocationAction action, |
| 580 size_t size); | 590 size_t size); |
| 581 | 591 |
| 582 static void AddMemoryAllocationCallback(MemoryAllocationCallback callback, | 592 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, |
| 583 ObjectSpace space, | 593 ObjectSpace space, |
| 584 AllocationAction action); | 594 AllocationAction action); |
| 585 static void RemoveMemoryAllocationCallback( | 595 void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback); |
| 586 MemoryAllocationCallback callback); | 596 bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback); |
| 587 static bool MemoryAllocationCallbackRegistered( | |
| 588 MemoryAllocationCallback callback); | |
| 589 | 597 |
| 590 // Returns the maximum available bytes of heaps. | 598 // Returns the maximum available bytes of heaps. |
| 591 static intptr_t Available() { | 599 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } |
| 592 return capacity_ < size_ ? 0 : capacity_ - size_; | |
| 593 } | |
| 594 | 600 |
| 595 // Returns allocated spaces in bytes. | 601 // Returns allocated spaces in bytes. |
| 596 static intptr_t Size() { return size_; } | 602 intptr_t Size() { return size_; } |
| 597 | 603 |
| 598 // Returns the maximum available executable bytes of heaps. | 604 // Returns the maximum available executable bytes of heaps. |
| 599 static intptr_t AvailableExecutable() { | 605 intptr_t AvailableExecutable() { |
| 600 if (capacity_executable_ < size_executable_) return 0; | 606 if (capacity_executable_ < size_executable_) return 0; |
| 601 return capacity_executable_ - size_executable_; | 607 return capacity_executable_ - size_executable_; |
| 602 } | 608 } |
| 603 | 609 |
| 604 // Returns allocated executable spaces in bytes. | 610 // Returns allocated executable spaces in bytes. |
| 605 static intptr_t SizeExecutable() { return size_executable_; } | 611 intptr_t SizeExecutable() { return size_executable_; } |
| 606 | 612 |
| 607 // Returns maximum available bytes that the old space can have. | 613 // Returns maximum available bytes that the old space can have. |
| 608 static intptr_t MaxAvailable() { | 614 intptr_t MaxAvailable() { |
| 609 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; | 615 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; |
| 610 } | 616 } |
| 611 | 617 |
| 612 // Sanity check on a pointer. | |
| 613 static bool SafeIsInAPageChunk(Address addr); | |
| 614 | |
| 615 // Links two pages. | 618 // Links two pages. |
| 616 static inline void SetNextPage(Page* prev, Page* next); | 619 inline void SetNextPage(Page* prev, Page* next); |
| 617 | 620 |
| 618 // Returns the next page of a given page. | 621 // Returns the next page of a given page. |
| 619 static inline Page* GetNextPage(Page* p); | 622 inline Page* GetNextPage(Page* p); |
| 620 | 623 |
| 621 // Checks whether a page belongs to a space. | 624 // Checks whether a page belongs to a space. |
| 622 static inline bool IsPageInSpace(Page* p, PagedSpace* space); | 625 inline bool IsPageInSpace(Page* p, PagedSpace* space); |
| 623 | 626 |
| 624 // Returns the space that owns the given page. | 627 // Returns the space that owns the given page. |
| 625 static inline PagedSpace* PageOwner(Page* page); | 628 inline PagedSpace* PageOwner(Page* page); |
| 626 | 629 |
| 627 // Finds the first/last page in the same chunk as a given page. | 630 // Finds the first/last page in the same chunk as a given page. |
| 628 static Page* FindFirstPageInSameChunk(Page* p); | 631 Page* FindFirstPageInSameChunk(Page* p); |
| 629 static Page* FindLastPageInSameChunk(Page* p); | 632 Page* FindLastPageInSameChunk(Page* p); |
| 630 | 633 |
| 631 // Relinks list of pages owned by space to make it chunk-ordered. | 634 // Relinks list of pages owned by space to make it chunk-ordered. |
| 632 // Returns new first and last pages of space. | 635 // Returns new first and last pages of space. |
| 633 // Also returns last page in relinked list which has WasInUsedBeforeMC | 636 // Also returns last page in relinked list which has WasInUsedBeforeMC |
| 634 // flag set. | 637 // flag set. |
| 635 static void RelinkPageListInChunkOrder(PagedSpace* space, | 638 void RelinkPageListInChunkOrder(PagedSpace* space, |
| 636 Page** first_page, | 639 Page** first_page, |
| 637 Page** last_page, | 640 Page** last_page, |
| 638 Page** last_page_in_use); | 641 Page** last_page_in_use); |
| 639 | 642 |
| 640 #ifdef ENABLE_HEAP_PROTECTION | 643 #ifdef ENABLE_HEAP_PROTECTION |
| 641 // Protect/unprotect a block of memory by marking it read-only/writable. | 644 // Protect/unprotect a block of memory by marking it read-only/writable. |
| 642 static inline void Protect(Address start, size_t size); | 645 inline void Protect(Address start, size_t size); |
| 643 static inline void Unprotect(Address start, size_t size, | 646 inline void Unprotect(Address start, size_t size, |
| 644 Executability executable); | 647 Executability executable); |
| 645 | 648 |
| 646 // Protect/unprotect a chunk given a page in the chunk. | 649 // Protect/unprotect a chunk given a page in the chunk. |
| 647 static inline void ProtectChunkFromPage(Page* page); | 650 inline void ProtectChunkFromPage(Page* page); |
| 648 static inline void UnprotectChunkFromPage(Page* page); | 651 inline void UnprotectChunkFromPage(Page* page); |
| 649 #endif | 652 #endif |
| 650 | 653 |
| 651 #ifdef DEBUG | 654 #ifdef DEBUG |
| 652 // Reports statistic info of the space. | 655 // Reports statistic info of the space. |
| 653 static void ReportStatistics(); | 656 void ReportStatistics(); |
| 654 #endif | 657 #endif |
| 655 | 658 |
| 656 static void AddToAllocatedChunks(Address addr, intptr_t size); | |
| 657 static void RemoveFromAllocatedChunks(Address addr, intptr_t size); | |
| 658 // Note: This only checks the regular chunks, not the odd-sized initial | |
| 659 // chunk. | |
| 660 static bool InAllocatedChunks(Address addr); | |
| 661 | |
| 662 // Due to encoding limitation, we can only have 8K chunks. | 659 // Due to encoding limitation, we can only have 8K chunks. |
| 663 static const int kMaxNofChunks = 1 << kPageSizeBits; | 660 static const int kMaxNofChunks = 1 << kPageSizeBits; |
| 664 // If a chunk has at least 16 pages, the maximum heap size is about | 661 // If a chunk has at least 16 pages, the maximum heap size is about |
| 665 // 8K * 8K * 16 = 1G bytes. | 662 // 8K * 8K * 16 = 1G bytes. |
| 666 #ifdef V8_TARGET_ARCH_X64 | 663 #ifdef V8_TARGET_ARCH_X64 |
| 667 static const int kPagesPerChunk = 32; | 664 static const int kPagesPerChunk = 32; |
| 668 // On 64 bit the chunk table consists of 4 levels of 4096-entry tables. | 665 // On 64 bit the chunk table consists of 4 levels of 4096-entry tables. |
| 669 static const int kPagesPerChunkLog2 = 5; | 666 static const int kPagesPerChunkLog2 = 5; |
| 670 static const int kChunkTableLevels = 4; | 667 static const int kChunkTableLevels = 4; |
| 671 static const int kChunkTableBitsPerLevel = 12; | 668 static const int kChunkTableBitsPerLevel = 12; |
| 672 #else | 669 #else |
| 673 static const int kPagesPerChunk = 16; | 670 static const int kPagesPerChunk = 16; |
| 674 // On 32 bit the chunk table consists of 2 levels of 256-entry tables. | 671 // On 32 bit the chunk table consists of 2 levels of 256-entry tables. |
| 675 static const int kPagesPerChunkLog2 = 4; | 672 static const int kPagesPerChunkLog2 = 4; |
| 676 static const int kChunkTableLevels = 2; | 673 static const int kChunkTableLevels = 2; |
| 677 static const int kChunkTableBitsPerLevel = 8; | 674 static const int kChunkTableBitsPerLevel = 8; |
| 678 #endif | 675 #endif |
| 679 | 676 |
| 680 private: | 677 private: |
| 678 MemoryAllocator(); |
| 679 |
| 681 static const int kChunkSize = kPagesPerChunk * Page::kPageSize; | 680 static const int kChunkSize = kPagesPerChunk * Page::kPageSize; |
| 682 static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits; | 681 static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits; |
| 683 static const int kChunkTableTopLevelEntries = | |
| 684 1 << (sizeof(intptr_t) * kBitsPerByte - kChunkSizeLog2 - | |
| 685 (kChunkTableLevels - 1) * kChunkTableBitsPerLevel); | |
| 686 | |
| 687 // The chunks are not chunk-size aligned so for a given chunk-sized area of | |
| 688 // memory there can be two chunks that cover it. | |
| 689 static const int kChunkTableFineGrainedWordsPerEntry = 2; | |
| 690 static const uintptr_t kUnusedChunkTableEntry = 0; | |
| 691 | 682 |
| 692 // Maximum space size in bytes. | 683 // Maximum space size in bytes. |
| 693 static intptr_t capacity_; | 684 intptr_t capacity_; |
| 694 // Maximum subset of capacity_ that can be executable | 685 // Maximum subset of capacity_ that can be executable |
| 695 static intptr_t capacity_executable_; | 686 intptr_t capacity_executable_; |
| 696 | |
| 697 // Top level table to track whether memory is part of a chunk or not. | |
| 698 static uintptr_t chunk_table_[kChunkTableTopLevelEntries]; | |
| 699 | 687 |
| 700 // Allocated space size in bytes. | 688 // Allocated space size in bytes. |
| 701 static intptr_t size_; | 689 intptr_t size_; |
| 690 |
| 702 // Allocated executable space size in bytes. | 691 // Allocated executable space size in bytes. |
| 703 static intptr_t size_executable_; | 692 intptr_t size_executable_; |
| 704 | 693 |
| 705 struct MemoryAllocationCallbackRegistration { | 694 struct MemoryAllocationCallbackRegistration { |
| 706 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, | 695 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, |
| 707 ObjectSpace space, | 696 ObjectSpace space, |
| 708 AllocationAction action) | 697 AllocationAction action) |
| 709 : callback(callback), space(space), action(action) { | 698 : callback(callback), space(space), action(action) { |
| 710 } | 699 } |
| 711 MemoryAllocationCallback callback; | 700 MemoryAllocationCallback callback; |
| 712 ObjectSpace space; | 701 ObjectSpace space; |
| 713 AllocationAction action; | 702 AllocationAction action; |
| 714 }; | 703 }; |
| 715 // A List of callback that are triggered when memory is allocated or free'd | 704 // A List of callback that are triggered when memory is allocated or free'd |
| 716 static List<MemoryAllocationCallbackRegistration> | 705 List<MemoryAllocationCallbackRegistration> |
| 717 memory_allocation_callbacks_; | 706 memory_allocation_callbacks_; |
| 718 | 707 |
| 719 // The initial chunk of virtual memory. | 708 // The initial chunk of virtual memory. |
| 720 static VirtualMemory* initial_chunk_; | 709 VirtualMemory* initial_chunk_; |
| 721 | 710 |
| 722 // Allocated chunk info: chunk start address, chunk size, and owning space. | 711 // Allocated chunk info: chunk start address, chunk size, and owning space. |
| 723 class ChunkInfo BASE_EMBEDDED { | 712 class ChunkInfo BASE_EMBEDDED { |
| 724 public: | 713 public: |
| 725 ChunkInfo() : address_(NULL), | 714 ChunkInfo() : address_(NULL), |
| 726 size_(0), | 715 size_(0), |
| 727 owner_(NULL), | 716 owner_(NULL), |
| 728 executable_(NOT_EXECUTABLE) {} | 717 executable_(NOT_EXECUTABLE), |
| 718 owner_identity_(FIRST_SPACE) {} |
| 729 inline void init(Address a, size_t s, PagedSpace* o); | 719 inline void init(Address a, size_t s, PagedSpace* o); |
| 730 Address address() { return address_; } | 720 Address address() { return address_; } |
| 731 size_t size() { return size_; } | 721 size_t size() { return size_; } |
| 732 PagedSpace* owner() { return owner_; } | 722 PagedSpace* owner() { return owner_; } |
| 733 // We save executability of the owner to allow using it | 723 // We save executability of the owner to allow using it |
| 734 // when collecting stats after the owner has been destroyed. | 724 // when collecting stats after the owner has been destroyed. |
| 735 Executability executable() const { return executable_; } | 725 Executability executable() const { return executable_; } |
| 726 AllocationSpace owner_identity() const { return owner_identity_; } |
| 736 | 727 |
| 737 private: | 728 private: |
| 738 Address address_; | 729 Address address_; |
| 739 size_t size_; | 730 size_t size_; |
| 740 PagedSpace* owner_; | 731 PagedSpace* owner_; |
| 741 Executability executable_; | 732 Executability executable_; |
| 733 AllocationSpace owner_identity_; |
| 742 }; | 734 }; |
| 743 | 735 |
| 744 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids. | 736 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids. |
| 745 static List<ChunkInfo> chunks_; | 737 List<ChunkInfo> chunks_; |
| 746 static List<int> free_chunk_ids_; | 738 List<int> free_chunk_ids_; |
| 747 static int max_nof_chunks_; | 739 int max_nof_chunks_; |
| 748 static int top_; | 740 int top_; |
| 749 | 741 |
| 750 // Push/pop a free chunk id onto/from the stack. | 742 // Push/pop a free chunk id onto/from the stack. |
| 751 static void Push(int free_chunk_id); | 743 void Push(int free_chunk_id); |
| 752 static int Pop(); | 744 int Pop(); |
| 753 static bool OutOfChunkIds() { return top_ == 0; } | 745 bool OutOfChunkIds() { return top_ == 0; } |
| 754 | 746 |
| 755 // Frees a chunk. | 747 // Frees a chunk. |
| 756 static void DeleteChunk(int chunk_id); | 748 void DeleteChunk(int chunk_id); |
| 757 | |
| 758 // Helpers to maintain and query the chunk tables. | |
| 759 static void AddChunkUsingAddress( | |
| 760 uintptr_t chunk_start, // Where the chunk starts. | |
| 761 uintptr_t chunk_index_base); // Used to place the chunk in the tables. | |
| 762 static void RemoveChunkFoundUsingAddress( | |
| 763 uintptr_t chunk_start, // Where the chunk starts. | |
| 764 uintptr_t chunk_index_base); // Used to locate the entry in the tables. | |
| 765 // Controls whether the lookup creates intermediate levels of tables as | |
| 766 // needed. | |
| 767 enum CreateTables { kDontCreateTables, kCreateTablesAsNeeded }; | |
| 768 static uintptr_t* AllocatedChunksFinder(uintptr_t* table, | |
| 769 uintptr_t address, | |
| 770 int bit_position, | |
| 771 CreateTables create_as_needed); | |
| 772 static void FreeChunkTables(uintptr_t* array, int length, int level); | |
| 773 static int FineGrainedIndexForAddress(uintptr_t address) { | |
| 774 int index = ((address >> kChunkSizeLog2) & | |
| 775 ((1 << kChunkTableBitsPerLevel) - 1)); | |
| 776 return index * kChunkTableFineGrainedWordsPerEntry; | |
| 777 } | |
| 778 | |
| 779 | 749 |
| 780 // Basic check whether a chunk id is in the valid range. | 750 // Basic check whether a chunk id is in the valid range. |
| 781 static inline bool IsValidChunkId(int chunk_id); | 751 inline bool IsValidChunkId(int chunk_id); |
| 782 | 752 |
| 783 // Checks whether a chunk id identifies an allocated chunk. | 753 // Checks whether a chunk id identifies an allocated chunk. |
| 784 static inline bool IsValidChunk(int chunk_id); | 754 inline bool IsValidChunk(int chunk_id); |
| 785 | 755 |
| 786 // Returns the chunk id that a page belongs to. | 756 // Returns the chunk id that a page belongs to. |
| 787 static inline int GetChunkId(Page* p); | 757 inline int GetChunkId(Page* p); |
| 788 | 758 |
| 789 // True if the address lies in the initial chunk. | 759 // True if the address lies in the initial chunk. |
| 790 static inline bool InInitialChunk(Address address); | 760 inline bool InInitialChunk(Address address); |
| 791 | 761 |
| 792 // Initializes pages in a chunk. Returns the first page address. | 762 // Initializes pages in a chunk. Returns the first page address. |
| 793 // This function and GetChunkId() are provided for the mark-compact | 763 // This function and GetChunkId() are provided for the mark-compact |
| 794 // collector to rebuild page headers in the from space, which is | 764 // collector to rebuild page headers in the from space, which is |
| 795 // used as a marking stack and its page headers are destroyed. | 765 // used as a marking stack and its page headers are destroyed. |
| 796 static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, | 766 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, |
| 797 PagedSpace* owner); | 767 PagedSpace* owner); |
| 798 | 768 |
| 799 static Page* RelinkPagesInChunk(int chunk_id, | 769 Page* RelinkPagesInChunk(int chunk_id, |
| 800 Address chunk_start, | 770 Address chunk_start, |
| 801 size_t chunk_size, | 771 size_t chunk_size, |
| 802 Page* prev, | 772 Page* prev, |
| 803 Page** last_page_in_use); | 773 Page** last_page_in_use); |
| 774 |
| 775 friend class Isolate; |
| 776 |
| 777 Isolate* isolate_; |
| 778 |
| 779 DISALLOW_COPY_AND_ASSIGN(MemoryAllocator); |
| 804 }; | 780 }; |
| 805 | 781 |
| 806 | 782 |
| 807 // ----------------------------------------------------------------------------- | 783 // ----------------------------------------------------------------------------- |
| 808 // Interface for heap object iterator to be implemented by all object space | 784 // Interface for heap object iterator to be implemented by all object space |
| 809 // object iterators. | 785 // object iterators. |
| 810 // | 786 // |
| 811 // NOTE: The space specific object iterators also implements the own next() | 787 // NOTE: The space specific object iterators also implements the own next() |
| 812 // method which is used to avoid using virtual functions | 788 // method which is used to avoid using virtual functions |
| 813 // iterating a specific space. | 789 // iterating a specific space. |
| (...skipping 227 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1041 intptr_t capacity_; | 1017 intptr_t capacity_; |
| 1042 intptr_t available_; | 1018 intptr_t available_; |
| 1043 intptr_t size_; | 1019 intptr_t size_; |
| 1044 intptr_t waste_; | 1020 intptr_t waste_; |
| 1045 }; | 1021 }; |
| 1046 | 1022 |
| 1047 | 1023 |
| 1048 class PagedSpace : public Space { | 1024 class PagedSpace : public Space { |
| 1049 public: | 1025 public: |
| 1050 // Creates a space with a maximum capacity, and an id. | 1026 // Creates a space with a maximum capacity, and an id. |
| 1051 PagedSpace(intptr_t max_capacity, | 1027 PagedSpace(Heap* heap, |
| 1028 intptr_t max_capacity, |
| 1052 AllocationSpace id, | 1029 AllocationSpace id, |
| 1053 Executability executable); | 1030 Executability executable); |
| 1054 | 1031 |
| 1055 virtual ~PagedSpace() {} | 1032 virtual ~PagedSpace() {} |
| 1056 | 1033 |
| 1057 // Set up the space using the given address range of virtual memory (from | 1034 // Set up the space using the given address range of virtual memory (from |
| 1058 // the memory allocator's initial chunk) if possible. If the block of | 1035 // the memory allocator's initial chunk) if possible. If the block of |
| 1059 // addresses is not big enough to contain a single page-aligned page, a | 1036 // addresses is not big enough to contain a single page-aligned page, a |
| 1060 // fresh chunk will be allocated. | 1037 // fresh chunk will be allocated. |
| 1061 bool Setup(Address start, size_t size); | 1038 bool Setup(Address start, size_t size); |
| (...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1334 // ----------------------------------------------------------------------------- | 1311 // ----------------------------------------------------------------------------- |
| 1335 // SemiSpace in young generation | 1312 // SemiSpace in young generation |
| 1336 // | 1313 // |
| 1337 // A semispace is a contiguous chunk of memory. The mark-compact collector | 1314 // A semispace is a contiguous chunk of memory. The mark-compact collector |
| 1338 // uses the memory in the from space as a marking stack when tracing live | 1315 // uses the memory in the from space as a marking stack when tracing live |
| 1339 // objects. | 1316 // objects. |
| 1340 | 1317 |
| 1341 class SemiSpace : public Space { | 1318 class SemiSpace : public Space { |
| 1342 public: | 1319 public: |
| 1343 // Constructor. | 1320 // Constructor. |
| 1344 SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) { | 1321 explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) { |
| 1345 start_ = NULL; | 1322 start_ = NULL; |
| 1346 age_mark_ = NULL; | 1323 age_mark_ = NULL; |
| 1347 } | 1324 } |
| 1348 | 1325 |
| 1349 // Sets up the semispace using the given chunk. | 1326 // Sets up the semispace using the given chunk. |
| 1350 bool Setup(Address start, int initial_capacity, int maximum_capacity); | 1327 bool Setup(Address start, int initial_capacity, int maximum_capacity); |
| 1351 | 1328 |
| 1352 // Tear down the space. Heap memory was not allocated by the space, so it | 1329 // Tear down the space. Heap memory was not allocated by the space, so it |
| 1353 // is not deallocated here. | 1330 // is not deallocated here. |
| 1354 void TearDown(); | 1331 void TearDown(); |
| (...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1501 | 1478 |
| 1502 // ----------------------------------------------------------------------------- | 1479 // ----------------------------------------------------------------------------- |
| 1503 // The young generation space. | 1480 // The young generation space. |
| 1504 // | 1481 // |
| 1505 // The new space consists of a contiguous pair of semispaces. It simply | 1482 // The new space consists of a contiguous pair of semispaces. It simply |
| 1506 // forwards most functions to the appropriate semispace. | 1483 // forwards most functions to the appropriate semispace. |
| 1507 | 1484 |
| 1508 class NewSpace : public Space { | 1485 class NewSpace : public Space { |
| 1509 public: | 1486 public: |
| 1510 // Constructor. | 1487 // Constructor. |
| 1511 NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {} | 1488 explicit NewSpace(Heap* heap) |
| 1489 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), |
| 1490 to_space_(heap), |
| 1491 from_space_(heap) {} |
| 1512 | 1492 |
| 1513 // Sets up the new space using the given chunk. | 1493 // Sets up the new space using the given chunk. |
| 1514 bool Setup(Address start, int size); | 1494 bool Setup(Address start, int size); |
| 1515 | 1495 |
| 1516 // Tears down the space. Heap memory was not allocated by the space, so it | 1496 // Tears down the space. Heap memory was not allocated by the space, so it |
| 1517 // is not deallocated here. | 1497 // is not deallocated here. |
| 1518 void TearDown(); | 1498 void TearDown(); |
| 1519 | 1499 |
| 1520 // True if the space has been set up but not torn down. | 1500 // True if the space has been set up but not torn down. |
| 1521 bool HasBeenSetup() { | 1501 bool HasBeenSetup() { |
| (...skipping 380 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1902 }; | 1882 }; |
| 1903 | 1883 |
| 1904 | 1884 |
| 1905 // ----------------------------------------------------------------------------- | 1885 // ----------------------------------------------------------------------------- |
| 1906 // Old object space (excluding map objects) | 1886 // Old object space (excluding map objects) |
| 1907 | 1887 |
| 1908 class OldSpace : public PagedSpace { | 1888 class OldSpace : public PagedSpace { |
| 1909 public: | 1889 public: |
| 1910 // Creates an old space object with a given maximum capacity. | 1890 // Creates an old space object with a given maximum capacity. |
| 1911 // The constructor does not allocate pages from OS. | 1891 // The constructor does not allocate pages from OS. |
| 1912 explicit OldSpace(intptr_t max_capacity, | 1892 OldSpace(Heap* heap, |
| 1913 AllocationSpace id, | 1893 intptr_t max_capacity, |
| 1914 Executability executable) | 1894 AllocationSpace id, |
| 1915 : PagedSpace(max_capacity, id, executable), free_list_(id) { | 1895 Executability executable) |
| 1896 : PagedSpace(heap, max_capacity, id, executable), free_list_(id) { |
| 1916 page_extra_ = 0; | 1897 page_extra_ = 0; |
| 1917 } | 1898 } |
| 1918 | 1899 |
| 1919 // The bytes available on the free list (ie, not above the linear allocation | 1900 // The bytes available on the free list (ie, not above the linear allocation |
| 1920 // pointer). | 1901 // pointer). |
| 1921 intptr_t AvailableFree() { return free_list_.available(); } | 1902 intptr_t AvailableFree() { return free_list_.available(); } |
| 1922 | 1903 |
| 1923 // The limit of allocation for a page in this space. | 1904 // The limit of allocation for a page in this space. |
| 1924 virtual Address PageAllocationLimit(Page* page) { | 1905 virtual Address PageAllocationLimit(Page* page) { |
| 1925 return page->ObjectAreaEnd(); | 1906 return page->ObjectAreaEnd(); |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1974 public: | 1955 public: |
| 1975 TRACK_MEMORY("OldSpace") | 1956 TRACK_MEMORY("OldSpace") |
| 1976 }; | 1957 }; |
| 1977 | 1958 |
| 1978 | 1959 |
| 1979 // ----------------------------------------------------------------------------- | 1960 // ----------------------------------------------------------------------------- |
| 1980 // Old space for objects of a fixed size | 1961 // Old space for objects of a fixed size |
| 1981 | 1962 |
| 1982 class FixedSpace : public PagedSpace { | 1963 class FixedSpace : public PagedSpace { |
| 1983 public: | 1964 public: |
| 1984 FixedSpace(intptr_t max_capacity, | 1965 FixedSpace(Heap* heap, |
| 1966 intptr_t max_capacity, |
| 1985 AllocationSpace id, | 1967 AllocationSpace id, |
| 1986 int object_size_in_bytes, | 1968 int object_size_in_bytes, |
| 1987 const char* name) | 1969 const char* name) |
| 1988 : PagedSpace(max_capacity, id, NOT_EXECUTABLE), | 1970 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), |
| 1989 object_size_in_bytes_(object_size_in_bytes), | 1971 object_size_in_bytes_(object_size_in_bytes), |
| 1990 name_(name), | 1972 name_(name), |
| 1991 free_list_(id, object_size_in_bytes) { | 1973 free_list_(id, object_size_in_bytes) { |
| 1992 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; | 1974 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; |
| 1993 } | 1975 } |
| 1994 | 1976 |
| 1995 // The limit of allocation for a page in this space. | 1977 // The limit of allocation for a page in this space. |
| 1996 virtual Address PageAllocationLimit(Page* page) { | 1978 virtual Address PageAllocationLimit(Page* page) { |
| 1997 return page->ObjectAreaEnd() - page_extra_; | 1979 return page->ObjectAreaEnd() - page_extra_; |
| 1998 } | 1980 } |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2052 FixedSizeFreeList free_list_; | 2034 FixedSizeFreeList free_list_; |
| 2053 }; | 2035 }; |
| 2054 | 2036 |
| 2055 | 2037 |
| 2056 // ----------------------------------------------------------------------------- | 2038 // ----------------------------------------------------------------------------- |
| 2057 // Old space for all map objects | 2039 // Old space for all map objects |
| 2058 | 2040 |
| 2059 class MapSpace : public FixedSpace { | 2041 class MapSpace : public FixedSpace { |
| 2060 public: | 2042 public: |
| 2061 // Creates a map space object with a maximum capacity. | 2043 // Creates a map space object with a maximum capacity. |
| 2062 MapSpace(intptr_t max_capacity, int max_map_space_pages, AllocationSpace id) | 2044 MapSpace(Heap* heap, |
| 2063 : FixedSpace(max_capacity, id, Map::kSize, "map"), | 2045 intptr_t max_capacity, |
| 2046 int max_map_space_pages, |
| 2047 AllocationSpace id) |
| 2048 : FixedSpace(heap, max_capacity, id, Map::kSize, "map"), |
| 2064 max_map_space_pages_(max_map_space_pages) { | 2049 max_map_space_pages_(max_map_space_pages) { |
| 2065 ASSERT(max_map_space_pages < kMaxMapPageIndex); | 2050 ASSERT(max_map_space_pages < kMaxMapPageIndex); |
| 2066 } | 2051 } |
| 2067 | 2052 |
| 2068 // Prepares for a mark-compact GC. | 2053 // Prepares for a mark-compact GC. |
| 2069 virtual void PrepareForMarkCompact(bool will_compact); | 2054 virtual void PrepareForMarkCompact(bool will_compact); |
| 2070 | 2055 |
| 2071 // Given an index, returns the page address. | 2056 // Given an index, returns the page address. |
| 2072 Address PageAddress(int page_index) { return page_addresses_[page_index]; } | 2057 Address PageAddress(int page_index) { return page_addresses_[page_index]; } |
| 2073 | 2058 |
| (...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2163 TRACK_MEMORY("MapSpace") | 2148 TRACK_MEMORY("MapSpace") |
| 2164 }; | 2149 }; |
| 2165 | 2150 |
| 2166 | 2151 |
| 2167 // ----------------------------------------------------------------------------- | 2152 // ----------------------------------------------------------------------------- |
| 2168 // Old space for all global object property cell objects | 2153 // Old space for all global object property cell objects |
| 2169 | 2154 |
| 2170 class CellSpace : public FixedSpace { | 2155 class CellSpace : public FixedSpace { |
| 2171 public: | 2156 public: |
| 2172 // Creates a property cell space object with a maximum capacity. | 2157 // Creates a property cell space object with a maximum capacity. |
| 2173 CellSpace(intptr_t max_capacity, AllocationSpace id) | 2158 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) |
| 2174 : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {} | 2159 : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell") |
| 2160 {} |
| 2175 | 2161 |
| 2176 protected: | 2162 protected: |
| 2177 #ifdef DEBUG | 2163 #ifdef DEBUG |
| 2178 virtual void VerifyObject(HeapObject* obj); | 2164 virtual void VerifyObject(HeapObject* obj); |
| 2179 #endif | 2165 #endif |
| 2180 | 2166 |
| 2181 public: | 2167 public: |
| 2182 TRACK_MEMORY("CellSpace") | 2168 TRACK_MEMORY("CellSpace") |
| 2183 }; | 2169 }; |
| 2184 | 2170 |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2239 // The total size of this chunk. | 2225 // The total size of this chunk. |
| 2240 size_t size_; | 2226 size_t size_; |
| 2241 | 2227 |
| 2242 public: | 2228 public: |
| 2243 TRACK_MEMORY("LargeObjectChunk") | 2229 TRACK_MEMORY("LargeObjectChunk") |
| 2244 }; | 2230 }; |
| 2245 | 2231 |
| 2246 | 2232 |
| 2247 class LargeObjectSpace : public Space { | 2233 class LargeObjectSpace : public Space { |
| 2248 public: | 2234 public: |
| 2249 explicit LargeObjectSpace(AllocationSpace id); | 2235 LargeObjectSpace(Heap* heap, AllocationSpace id); |
| 2250 virtual ~LargeObjectSpace() {} | 2236 virtual ~LargeObjectSpace() {} |
| 2251 | 2237 |
| 2252 // Initializes internal data structures. | 2238 // Initializes internal data structures. |
| 2253 bool Setup(); | 2239 bool Setup(); |
| 2254 | 2240 |
| 2255 // Releases internal resources, frees objects in this space. | 2241 // Releases internal resources, frees objects in this space. |
| 2256 void TearDown(); | 2242 void TearDown(); |
| 2257 | 2243 |
| 2258 // Allocates a (non-FixedArray, non-Code) large object. | 2244 // Allocates a (non-FixedArray, non-Code) large object. |
| 2259 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes); | 2245 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes); |
| 2260 // Allocates a large Code object. | 2246 // Allocates a large Code object. |
| 2261 MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes); | 2247 MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes); |
| 2262 // Allocates a large FixedArray. | 2248 // Allocates a large FixedArray. |
| 2263 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes); | 2249 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes); |
| 2264 | 2250 |
| 2265 // Available bytes for objects in this space. | 2251 // Available bytes for objects in this space. |
| 2266 intptr_t Available() { | 2252 inline intptr_t Available(); |
| 2267 return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available()); | |
| 2268 } | |
| 2269 | 2253 |
| 2270 virtual intptr_t Size() { | 2254 virtual intptr_t Size() { |
| 2271 return size_; | 2255 return size_; |
| 2272 } | 2256 } |
| 2273 | 2257 |
| 2274 virtual intptr_t SizeOfObjects() { | 2258 virtual intptr_t SizeOfObjects() { |
| 2275 return objects_size_; | 2259 return objects_size_; |
| 2276 } | 2260 } |
| 2277 | 2261 |
| 2278 int PageCount() { | 2262 int PageCount() { |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2350 | 2334 |
| 2351 // implementation of ObjectIterator. | 2335 // implementation of ObjectIterator. |
| 2352 virtual HeapObject* next_object() { return next(); } | 2336 virtual HeapObject* next_object() { return next(); } |
| 2353 | 2337 |
| 2354 private: | 2338 private: |
| 2355 LargeObjectChunk* current_; | 2339 LargeObjectChunk* current_; |
| 2356 HeapObjectCallback size_func_; | 2340 HeapObjectCallback size_func_; |
| 2357 }; | 2341 }; |
| 2358 | 2342 |
| 2359 | 2343 |
| 2344 #ifdef DEBUG |
| 2345 struct CommentStatistic { |
| 2346 const char* comment; |
| 2347 int size; |
| 2348 int count; |
| 2349 void Clear() { |
| 2350 comment = NULL; |
| 2351 size = 0; |
| 2352 count = 0; |
| 2353 } |
| 2354 // Must be small, since an iteration is used for lookup. |
| 2355 static const int kMaxComments = 64; |
| 2356 }; |
| 2357 #endif |
| 2358 |
| 2359 |
| 2360 } } // namespace v8::internal | 2360 } } // namespace v8::internal |
| 2361 | 2361 |
| 2362 #endif // V8_SPACES_H_ | 2362 #endif // V8_SPACES_H_ |
| OLD | NEW |