OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 297 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
308 virtual void Print() = 0; | 308 virtual void Print() = 0; |
309 #endif | 309 #endif |
310 | 310 |
311 private: | 311 private: |
312 AllocationSpace id_; | 312 AllocationSpace id_; |
313 Executability executable_; | 313 Executability executable_; |
314 }; | 314 }; |
315 | 315 |
316 | 316 |
317 // ---------------------------------------------------------------------------- | 317 // ---------------------------------------------------------------------------- |
| 318 // All heap objects containing executable code (code objects) must be allocated |
| 319 // from a 2 GB range of memory, so that they can call each other using 32-bit |
| 320 // displacements. This happens automatically on 32-bit platforms, where 32-bit |
| 321 // displacements cover the entire 4GB virtual address space. On 64-bit |
| 322 // platforms, we support this using the CodeRange object, which reserves and |
| 323 // manages a range of virtual memory. |
| 324 class CodeRange : public AllStatic { |
| 325 public: |
| 326 // Reserves a range of virtual memory, but does not commit any of it. |
| 327 // Can only be called once, at heap initialization time. |
| 328 // Returns false on failure. |
| 329 static bool Setup(const size_t requested_size); |
| 330 |
| 331 // Frees the range of virtual memory, and frees the data structures used to |
| 332 // manage it. |
| 333 static void TearDown(); |
| 334 |
| 335 static bool exists() { return code_range_ != NULL; } |
| 336 static bool contains(Address address) { |
| 337 if (code_range_ == NULL) return false; |
| 338 Address start = static_cast<Address>(code_range_->address()); |
| 339 return start <= address && address < start + code_range_->size(); |
| 340 } |
| 341 |
| 342 // Allocates a chunk of memory from the large-object portion of |
| 343 // the code range. On platforms with no separate code range, should |
| 344 // not be called. |
| 345 static void* AllocateRawMemory(const size_t requested, size_t* allocated); |
| 346 static void FreeRawMemory(void* buf, size_t length); |
| 347 |
| 348 private: |
| 349 // The reserved range of virtual memory that all code objects are put in. |
| 350 static VirtualMemory* code_range_; |
| 351 // Plain old data class, just a struct plus a constructor. |
| 352 class FreeBlock { |
| 353 public: |
| 354 FreeBlock(Address start_arg, size_t size_arg) |
| 355 : start(start_arg), size(size_arg) {} |
| 356 FreeBlock(void* start_arg, size_t size_arg) |
| 357 : start(static_cast<Address>(start_arg)), size(size_arg) {} |
| 358 |
| 359 Address start; |
| 360 size_t size; |
| 361 }; |
| 362 |
| 363 // Freed blocks of memory are added to the free list. When the allocation |
| 364 // list is exhausted, the free list is sorted and merged to make the new |
| 365 // allocation list. |
| 366 static List<FreeBlock> free_list_; |
| 367 // Memory is allocated from the free blocks on the allocation list. |
| 368 // The block at current_allocation_block_index_ is the current block. |
| 369 static List<FreeBlock> allocation_list_; |
| 370 static int current_allocation_block_index_; |
| 371 |
| 372 // Finds a block on the allocation list that contains at least the |
| 373 // requested amount of memory. If none is found, sorts and merges |
| 374 // the existing free memory blocks, and searches again. |
| 375 // If none can be found, terminates V8 with FatalProcessOutOfMemory. |
| 376 static void GetNextAllocationBlock(size_t requested); |
| 377 // Compares the start addresses of two free blocks. |
| 378 static int CompareFreeBlockAddress(const FreeBlock* left, |
| 379 const FreeBlock* right); |
| 380 }; |
| 381 |
| 382 |
| 383 // ---------------------------------------------------------------------------- |
318 // A space acquires chunks of memory from the operating system. The memory | 384 // A space acquires chunks of memory from the operating system. The memory |
319 // allocator manages chunks for the paged heap spaces (old space and map | 385 // allocator manages chunks for the paged heap spaces (old space and map |
320 // space). A paged chunk consists of pages. Pages in a chunk have contiguous | 386 // space). A paged chunk consists of pages. Pages in a chunk have contiguous |
321 // addresses and are linked as a list. | 387 // addresses and are linked as a list. |
322 // | 388 // |
323 // The allocator keeps an initial chunk which is used for the new space. The | 389 // The allocator keeps an initial chunk which is used for the new space. The |
324 // leftover regions of the initial chunk are used for the initial chunks of | 390 // leftover regions of the initial chunk are used for the initial chunks of |
325 // old space and map space if they are big enough to hold at least one page. | 391 // old space and map space if they are big enough to hold at least one page. |
326 // The allocator assumes that there is one old space and one map space, each | 392 // The allocator assumes that there is one old space and one map space, each |
327 // expands the space by allocating kPagesPerChunk pages except the last | 393 // expands the space by allocating kPagesPerChunk pages except the last |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
373 // block is contained in the initial chunk. Returns true if it succeeded | 439 // block is contained in the initial chunk. Returns true if it succeeded |
374 // and false otherwise. | 440 // and false otherwise. |
375 static bool UncommitBlock(Address start, size_t size); | 441 static bool UncommitBlock(Address start, size_t size); |
376 | 442 |
377 // Attempts to allocate the requested (non-zero) number of pages from the | 443 // Attempts to allocate the requested (non-zero) number of pages from the |
378 // OS. Fewer pages might be allocated than requested. If it fails to | 444 // OS. Fewer pages might be allocated than requested. If it fails to |
379 // allocate memory for the OS or cannot allocate a single page, this | 445 // allocate memory for the OS or cannot allocate a single page, this |
380 // function returns an invalid page pointer (NULL). The caller must check | 446 // function returns an invalid page pointer (NULL). The caller must check |
381 // whether the returned page is valid (by calling Page::is_valid()). It is | 447 // whether the returned page is valid (by calling Page::is_valid()). It is |
382 // guaranteed that allocated pages have contiguous addresses. The actual | 448 // guaranteed that allocated pages have contiguous addresses. The actual |
383 // number of allocated page is returned in the output parameter | 449 // number of allocated pages is returned in the output parameter |
384 // allocated_pages. | 450 // allocated_pages. If the PagedSpace owner is executable and there is |
| 451 // a code range, the pages are allocated from the code range. |
385 static Page* AllocatePages(int requested_pages, int* allocated_pages, | 452 static Page* AllocatePages(int requested_pages, int* allocated_pages, |
386 PagedSpace* owner); | 453 PagedSpace* owner); |
387 | 454 |
388 // Frees pages from a given page and after. If 'p' is the first page | 455 // Frees pages from a given page and after. If 'p' is the first page |
389 // of a chunk, pages from 'p' are freed and this function returns an | 456 // of a chunk, pages from 'p' are freed and this function returns an |
390 // invalid page pointer. Otherwise, the function searches a page | 457 // invalid page pointer. Otherwise, the function searches a page |
391 // after 'p' that is the first page of a chunk. Pages after the | 458 // after 'p' that is the first page of a chunk. Pages after the |
392 // found page are freed and the function returns 'p'. | 459 // found page are freed and the function returns 'p'. |
393 static Page* FreePages(Page* p); | 460 static Page* FreePages(Page* p); |
394 | 461 |
395 // Allocates and frees raw memory of certain size. | 462 // Allocates and frees raw memory of certain size. |
396 // These are just thin wrappers around OS::Allocate and OS::Free, | 463 // These are just thin wrappers around OS::Allocate and OS::Free, |
397 // but keep track of allocated bytes as part of heap. | 464 // but keep track of allocated bytes as part of heap. |
| 465 // If the flag is EXECUTABLE and a code range exists, the requested |
| 466 // memory is allocated from the code range. If a code range exists |
| 467 // and the freed memory is in it, the code range manages the freed memory. |
398 static void* AllocateRawMemory(const size_t requested, | 468 static void* AllocateRawMemory(const size_t requested, |
399 size_t* allocated, | 469 size_t* allocated, |
400 Executability executable); | 470 Executability executable); |
401 static void FreeRawMemory(void* buf, size_t length); | 471 static void FreeRawMemory(void* buf, size_t length); |
402 | 472 |
403 // Returns the maximum available bytes of heaps. | 473 // Returns the maximum available bytes of heaps. |
404 static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } | 474 static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } |
405 | 475 |
406 // Returns allocated spaces in bytes. | 476 // Returns allocated spaces in bytes. |
407 static int Size() { return size_; } | 477 static int Size() { return size_; } |
(...skipping 1455 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1863 | 1933 |
1864 private: | 1934 private: |
1865 LargeObjectChunk* current_; | 1935 LargeObjectChunk* current_; |
1866 HeapObjectCallback size_func_; | 1936 HeapObjectCallback size_func_; |
1867 }; | 1937 }; |
1868 | 1938 |
1869 | 1939 |
1870 } } // namespace v8::internal | 1940 } } // namespace v8::internal |
1871 | 1941 |
1872 #endif // V8_SPACES_H_ | 1942 #endif // V8_SPACES_H_ |
OLD | NEW |