OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
190 // only after PagedSpace::PrepareForMarkCompact was called. | 190 // only after PagedSpace::PrepareForMarkCompact was called. |
191 inline bool WasInUseBeforeMC(); | 191 inline bool WasInUseBeforeMC(); |
192 | 192 |
193 inline void SetWasInUseBeforeMC(bool was_in_use); | 193 inline void SetWasInUseBeforeMC(bool was_in_use); |
194 | 194 |
195 // True if this page is a large object page. | 195 // True if this page is a large object page. |
196 inline bool IsLargeObjectPage(); | 196 inline bool IsLargeObjectPage(); |
197 | 197 |
198 inline void SetIsLargeObjectPage(bool is_large_object_page); | 198 inline void SetIsLargeObjectPage(bool is_large_object_page); |
199 | 199 |
| 200 inline bool IsPageExecutable(); |
| 201 |
| 202 inline void SetIsPageExecutable(bool is_page_executable); |
| 203 |
200 // Returns the offset of a given address to this page. | 204 // Returns the offset of a given address to this page. |
201 INLINE(int Offset(Address a)) { | 205 INLINE(int Offset(Address a)) { |
202 int offset = static_cast<int>(a - address()); | 206 int offset = static_cast<int>(a - address()); |
203 ASSERT_PAGE_OFFSET(offset); | 207 ASSERT_PAGE_OFFSET(offset); |
204 return offset; | 208 return offset; |
205 } | 209 } |
206 | 210 |
207 // Returns the address for a given offset to the this page. | 211 // Returns the address for a given offset to the this page. |
208 Address OffsetToAddress(int offset) { | 212 Address OffsetToAddress(int offset) { |
209 ASSERT_PAGE_OFFSET(offset); | 213 ASSERT_PAGE_OFFSET(offset); |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
249 static const int kMaxHeapObjectSize = kObjectAreaSize; | 253 static const int kMaxHeapObjectSize = kObjectAreaSize; |
250 | 254 |
251 static const int kDirtyFlagOffset = 2 * kPointerSize; | 255 static const int kDirtyFlagOffset = 2 * kPointerSize; |
252 static const int kRegionSizeLog2 = 8; | 256 static const int kRegionSizeLog2 = 8; |
253 static const int kRegionSize = 1 << kRegionSizeLog2; | 257 static const int kRegionSize = 1 << kRegionSizeLog2; |
254 static const intptr_t kRegionAlignmentMask = (kRegionSize - 1); | 258 static const intptr_t kRegionAlignmentMask = (kRegionSize - 1); |
255 | 259 |
256 STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt); | 260 STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt); |
257 | 261 |
258 enum PageFlag { | 262 enum PageFlag { |
259 IS_NORMAL_PAGE = 1 << 0, | 263 IS_NORMAL_PAGE = 0, |
260 WAS_IN_USE_BEFORE_MC = 1 << 1, | 264 WAS_IN_USE_BEFORE_MC, |
261 | 265 |
262 // Page allocation watermark was bumped by preallocation during scavenge. | 266 // Page allocation watermark was bumped by preallocation during scavenge. |
263 // Correct watermark can be retrieved by CachedAllocationWatermark() method | 267 // Correct watermark can be retrieved by CachedAllocationWatermark() method |
264 WATERMARK_INVALIDATED = 1 << 2 | 268 WATERMARK_INVALIDATED, |
| 269 IS_EXECUTABLE, |
| 270 NUM_PAGE_FLAGS // Must be last |
265 }; | 271 }; |
| 272 static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1; |
266 | 273 |
267 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during | 274 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during |
268 // scavenge we just invalidate the watermark on each old space page after | 275 // scavenge we just invalidate the watermark on each old space page after |
269 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED | 276 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED |
270 // flag at the beginning of the next scavenge and each page becomes marked as | 277 // flag at the beginning of the next scavenge and each page becomes marked as |
271 // having a valid watermark. | 278 // having a valid watermark. |
272 // | 279 // |
273 // The following invariant must hold for pages in old pointer and map spaces: | 280 // The following invariant must hold for pages in old pointer and map spaces: |
274 // If page is in use then page is marked as having invalid watermark at | 281 // If page is in use then page is marked as having invalid watermark at |
275 // the beginning and at the end of any GC. | 282 // the beginning and at the end of any GC. |
276 // | 283 // |
277 // This invariant guarantees that after flipping flag meaning at the | 284 // This invariant guarantees that after flipping flag meaning at the |
278 // beginning of scavenge all pages in use will be marked as having valid | 285 // beginning of scavenge all pages in use will be marked as having valid |
279 // watermark. | 286 // watermark. |
280 static inline void FlipMeaningOfInvalidatedWatermarkFlag(); | 287 static inline void FlipMeaningOfInvalidatedWatermarkFlag(); |
281 | 288 |
282 // Returns true if the page allocation watermark was not altered during | 289 // Returns true if the page allocation watermark was not altered during |
283 // scavenge. | 290 // scavenge. |
284 inline bool IsWatermarkValid(); | 291 inline bool IsWatermarkValid(); |
285 | 292 |
286 inline void InvalidateWatermark(bool value); | 293 inline void InvalidateWatermark(bool value); |
287 | 294 |
288 inline bool GetPageFlag(PageFlag flag); | 295 inline bool GetPageFlag(PageFlag flag); |
289 inline void SetPageFlag(PageFlag flag, bool value); | 296 inline void SetPageFlag(PageFlag flag, bool value); |
290 inline void ClearPageFlags(); | 297 inline void ClearPageFlags(); |
291 | 298 |
292 inline void ClearGCFields(); | 299 inline void ClearGCFields(); |
293 | 300 |
294 static const int kAllocationWatermarkOffsetShift = 3; | 301 static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1; |
295 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1; | 302 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1; |
296 static const uint32_t kAllocationWatermarkOffsetMask = | 303 static const uint32_t kAllocationWatermarkOffsetMask = |
297 ((1 << kAllocationWatermarkOffsetBits) - 1) << | 304 ((1 << kAllocationWatermarkOffsetBits) - 1) << |
298 kAllocationWatermarkOffsetShift; | 305 kAllocationWatermarkOffsetShift; |
299 | 306 |
300 static const uint32_t kFlagsMask = | 307 static const uint32_t kFlagsMask = |
301 ((1 << kAllocationWatermarkOffsetShift) - 1); | 308 ((1 << kAllocationWatermarkOffsetShift) - 1); |
302 | 309 |
303 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >= | 310 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >= |
304 kAllocationWatermarkOffsetBits); | 311 kAllocationWatermarkOffsetBits); |
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
550 | 557 |
551 // Allocates and frees raw memory of certain size. | 558 // Allocates and frees raw memory of certain size. |
552 // These are just thin wrappers around OS::Allocate and OS::Free, | 559 // These are just thin wrappers around OS::Allocate and OS::Free, |
553 // but keep track of allocated bytes as part of heap. | 560 // but keep track of allocated bytes as part of heap. |
554 // If the flag is EXECUTABLE and a code range exists, the requested | 561 // If the flag is EXECUTABLE and a code range exists, the requested |
555 // memory is allocated from the code range. If a code range exists | 562 // memory is allocated from the code range. If a code range exists |
556 // and the freed memory is in it, the code range manages the freed memory. | 563 // and the freed memory is in it, the code range manages the freed memory. |
557 static void* AllocateRawMemory(const size_t requested, | 564 static void* AllocateRawMemory(const size_t requested, |
558 size_t* allocated, | 565 size_t* allocated, |
559 Executability executable); | 566 Executability executable); |
560 static void FreeRawMemory(void* buf, size_t length); | 567 static void FreeRawMemory(void* buf, |
| 568 size_t length, |
| 569 Executability executable); |
561 | 570 |
562 // Returns the maximum available bytes of heaps. | 571 // Returns the maximum available bytes of heaps. |
563 static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } | 572 static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } |
564 | 573 |
565 // Returns allocated spaces in bytes. | 574 // Returns allocated spaces in bytes. |
566 static int Size() { return size_; } | 575 static int Size() { return size_; } |
567 | 576 |
| 577 // Returns allocated executable spaces in bytes. |
| 578 static int SizeExecutable() { return size_executable_; } |
| 579 |
568 // Returns maximum available bytes that the old space can have. | 580 // Returns maximum available bytes that the old space can have. |
569 static int MaxAvailable() { | 581 static int MaxAvailable() { |
570 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; | 582 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; |
571 } | 583 } |
572 | 584 |
573 // Links two pages. | 585 // Links two pages. |
574 static inline void SetNextPage(Page* prev, Page* next); | 586 static inline void SetNextPage(Page* prev, Page* next); |
575 | 587 |
576 // Returns the next page of a given page. | 588 // Returns the next page of a given page. |
577 static inline Page* GetNextPage(Page* p); | 589 static inline Page* GetNextPage(Page* p); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
621 static const int kPagesPerChunk = 16; | 633 static const int kPagesPerChunk = 16; |
622 #endif | 634 #endif |
623 static const int kChunkSize = kPagesPerChunk * Page::kPageSize; | 635 static const int kChunkSize = kPagesPerChunk * Page::kPageSize; |
624 | 636 |
625 private: | 637 private: |
626 // Maximum space size in bytes. | 638 // Maximum space size in bytes. |
627 static int capacity_; | 639 static int capacity_; |
628 | 640 |
629 // Allocated space size in bytes. | 641 // Allocated space size in bytes. |
630 static int size_; | 642 static int size_; |
| 643 // Allocated executable space size in bytes. |
| 644 static int size_executable_; |
631 | 645 |
632 // The initial chunk of virtual memory. | 646 // The initial chunk of virtual memory. |
633 static VirtualMemory* initial_chunk_; | 647 static VirtualMemory* initial_chunk_; |
634 | 648 |
635 // Allocated chunk info: chunk start address, chunk size, and owning space. | 649 // Allocated chunk info: chunk start address, chunk size, and owning space. |
636 class ChunkInfo BASE_EMBEDDED { | 650 class ChunkInfo BASE_EMBEDDED { |
637 public: | 651 public: |
638 ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {} | 652 ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {} |
639 void init(Address a, size_t s, PagedSpace* o) { | 653 void init(Address a, size_t s, PagedSpace* o) { |
640 address_ = a; | 654 address_ = a; |
(...skipping 1410 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2051 return reinterpret_cast<LargeObjectChunk*>(address); | 2065 return reinterpret_cast<LargeObjectChunk*>(address); |
2052 } | 2066 } |
2053 | 2067 |
2054 // Returns the address of this chunk. | 2068 // Returns the address of this chunk. |
2055 Address address() { return reinterpret_cast<Address>(this); } | 2069 Address address() { return reinterpret_cast<Address>(this); } |
2056 | 2070 |
2057 // Accessors for the fields of the chunk. | 2071 // Accessors for the fields of the chunk. |
2058 LargeObjectChunk* next() { return next_; } | 2072 LargeObjectChunk* next() { return next_; } |
2059 void set_next(LargeObjectChunk* chunk) { next_ = chunk; } | 2073 void set_next(LargeObjectChunk* chunk) { next_ = chunk; } |
2060 | 2074 |
2061 size_t size() { return size_; } | 2075 size_t size() { return size_ & ~Page::kPageFlagMask; } |
2062 void set_size(size_t size_in_bytes) { size_ = size_in_bytes; } | 2076 void set_size(size_t size_in_bytes) { size_ = size_in_bytes; } |
2063 | 2077 |
2064 // Returns the object in this chunk. | 2078 // Returns the object in this chunk. |
2065 inline HeapObject* GetObject(); | 2079 inline HeapObject* GetObject(); |
2066 | 2080 |
2067 // Given a requested size returns the physical size of a chunk to be | 2081 // Given a requested size returns the physical size of a chunk to be |
2068 // allocated. | 2082 // allocated. |
2069 static int ChunkSizeFor(int size_in_bytes); | 2083 static int ChunkSizeFor(int size_in_bytes); |
2070 | 2084 |
2071 // Given a chunk size, returns the object size it can accommodate. Used by | 2085 // Given a chunk size, returns the object size it can accommodate. Used by |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2188 | 2202 |
2189 private: | 2203 private: |
2190 LargeObjectChunk* current_; | 2204 LargeObjectChunk* current_; |
2191 HeapObjectCallback size_func_; | 2205 HeapObjectCallback size_func_; |
2192 }; | 2206 }; |
2193 | 2207 |
2194 | 2208 |
2195 } } // namespace v8::internal | 2209 } } // namespace v8::internal |
2196 | 2210 |
2197 #endif // V8_SPACES_H_ | 2211 #endif // V8_SPACES_H_ |
OLD | NEW |