| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 17 matching lines...) Expand all Loading... |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #include "liveobjectlist-inl.h" | 30 #include "liveobjectlist-inl.h" |
| 31 #include "macro-assembler.h" | 31 #include "macro-assembler.h" |
| 32 #include "mark-compact.h" | 32 #include "mark-compact.h" |
| 33 #include "platform.h" | 33 #include "platform.h" |
| 34 | 34 |
| 35 namespace v8 { | 35 namespace v8 { |
| 36 namespace internal { | 36 namespace internal { |
| 37 | 37 |
| 38 // For contiguous spaces, top should be in the space (or at the end) and limit | |
| 39 // should be the end of the space. | |
| 40 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ | |
| 41 ASSERT((space).low() <= (info).top \ | |
| 42 && (info).top <= (space).high() \ | |
| 43 && (info).limit == (space).high()) | |
| 44 | 38 |
| 45 // ---------------------------------------------------------------------------- | 39 // ---------------------------------------------------------------------------- |
| 46 // HeapObjectIterator | 40 // HeapObjectIterator |
| 47 | 41 |
| 48 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { | 42 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { |
| 49 Initialize(space->bottom(), space->top(), NULL); | 43 // You can't actually iterate over the anchor page. It is not a real page, |
| 44 // just an anchor for the double linked page list. Initialize as if we have |
| 45 // reached the end of the anchor page, then the first iteration will move on |
| 46 // to the first page. |
| 47 Initialize(space, |
| 48 NULL, |
| 49 NULL, |
| 50 kAllPagesInSpace, |
| 51 NULL); |
| 50 } | 52 } |
| 51 | 53 |
| 52 | 54 |
| 53 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, | 55 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, |
| 54 HeapObjectCallback size_func) { | 56 HeapObjectCallback size_func) { |
| 55 Initialize(space->bottom(), space->top(), size_func); | 57 // You can't actually iterate over the anchor page. It is not a real page, |
| 56 } | 58 // just an anchor for the double linked page list. Initialize the current |
| 57 | 59 // address and end as NULL, then the first iteration will move on |
| 58 | 60 // to the first page. |
| 59 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) { | 61 Initialize(space, |
| 60 Initialize(start, space->top(), NULL); | 62 NULL, |
| 61 } | 63 NULL, |
| 62 | 64 kAllPagesInSpace, |
| 63 | 65 size_func); |
| 64 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start, | |
| 65 HeapObjectCallback size_func) { | |
| 66 Initialize(start, space->top(), size_func); | |
| 67 } | 66 } |
| 68 | 67 |
| 69 | 68 |
| 70 HeapObjectIterator::HeapObjectIterator(Page* page, | 69 HeapObjectIterator::HeapObjectIterator(Page* page, |
| 71 HeapObjectCallback size_func) { | 70 HeapObjectCallback size_func) { |
| 72 Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func); | 71 Space* owner = page->owner(); |
| 72 ASSERT(owner == HEAP->old_pointer_space() || |
| 73 owner == HEAP->old_data_space() || |
| 74 owner == HEAP->map_space() || |
| 75 owner == HEAP->cell_space() || |
| 76 owner == HEAP->code_space()); |
| 77 Initialize(reinterpret_cast<PagedSpace*>(owner), |
| 78 page->ObjectAreaStart(), |
| 79 page->ObjectAreaEnd(), |
| 80 kOnePageOnly, |
| 81 size_func); |
| 82 ASSERT(page->WasSweptPrecisely()); |
| 73 } | 83 } |
| 74 | 84 |
| 75 | 85 |
| 76 void HeapObjectIterator::Initialize(Address cur, Address end, | 86 void HeapObjectIterator::Initialize(PagedSpace* space, |
| 87 Address cur, Address end, |
| 88 HeapObjectIterator::PageMode mode, |
| 77 HeapObjectCallback size_f) { | 89 HeapObjectCallback size_f) { |
| 90 // Check that we actually can iterate this space. |
| 91 ASSERT(!space->was_swept_conservatively()); |
| 92 |
| 93 space_ = space; |
| 78 cur_addr_ = cur; | 94 cur_addr_ = cur; |
| 79 end_addr_ = end; | 95 cur_end_ = end; |
| 80 end_page_ = Page::FromAllocationTop(end); | 96 page_mode_ = mode; |
| 81 size_func_ = size_f; | 97 size_func_ = size_f; |
| 82 Page* p = Page::FromAllocationTop(cur_addr_); | |
| 83 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop(); | |
| 84 | 98 |
| 85 #ifdef DEBUG | 99 #ifdef DEBUG |
| 86 Verify(); | 100 Verify(); |
| 87 #endif | 101 #endif |
| 88 } | 102 } |
| 89 | 103 |
| 90 | 104 |
| 91 HeapObject* HeapObjectIterator::FromNextPage() { | 105 // We have hit the end of the page and should advance to the next block of |
| 92 if (cur_addr_ == end_addr_) return NULL; | 106 // objects. This happens at the end of the page. |
| 93 | 107 bool HeapObjectIterator::AdvanceToNextPage() { |
| 94 Page* cur_page = Page::FromAllocationTop(cur_addr_); | 108 ASSERT(cur_addr_ == cur_end_); |
| 109 if (page_mode_ == kOnePageOnly) return false; |
| 110 Page* cur_page; |
| 111 if (cur_addr_ == NULL) { |
| 112 cur_page = space_->anchor(); |
| 113 } else { |
| 114 cur_page = Page::FromAddress(cur_addr_ - 1); |
| 115 ASSERT(cur_addr_ == cur_page->ObjectAreaEnd()); |
| 116 } |
| 95 cur_page = cur_page->next_page(); | 117 cur_page = cur_page->next_page(); |
| 96 ASSERT(cur_page->is_valid()); | 118 if (cur_page == space_->anchor()) return false; |
| 97 | |
| 98 cur_addr_ = cur_page->ObjectAreaStart(); | 119 cur_addr_ = cur_page->ObjectAreaStart(); |
| 99 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop(); | 120 cur_end_ = cur_page->ObjectAreaEnd(); |
| 100 | 121 ASSERT(cur_page->WasSweptPrecisely()); |
| 101 if (cur_addr_ == end_addr_) return NULL; | 122 return true; |
| 102 ASSERT(cur_addr_ < cur_limit_); | |
| 103 #ifdef DEBUG | |
| 104 Verify(); | |
| 105 #endif | |
| 106 return FromCurrentPage(); | |
| 107 } | 123 } |
| 108 | 124 |
| 109 | 125 |
| 110 #ifdef DEBUG | 126 #ifdef DEBUG |
| 111 void HeapObjectIterator::Verify() { | 127 void HeapObjectIterator::Verify() { |
| 112 Page* p = Page::FromAllocationTop(cur_addr_); | 128 // TODO(gc): We should do something here. |
| 113 ASSERT(p == Page::FromAllocationTop(cur_limit_)); | |
| 114 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_)); | |
| 115 } | 129 } |
| 116 #endif | 130 #endif |
| 117 | 131 |
| 118 | 132 |
| 119 // ----------------------------------------------------------------------------- | 133 // ----------------------------------------------------------------------------- |
| 120 // PageIterator | |
| 121 | |
| 122 PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) { | |
| 123 prev_page_ = NULL; | |
| 124 switch (mode) { | |
| 125 case PAGES_IN_USE: | |
| 126 stop_page_ = space->AllocationTopPage(); | |
| 127 break; | |
| 128 case PAGES_USED_BY_MC: | |
| 129 stop_page_ = space->MCRelocationTopPage(); | |
| 130 break; | |
| 131 case ALL_PAGES: | |
| 132 #ifdef DEBUG | |
| 133 // Verify that the cached last page in the space is actually the | |
| 134 // last page. | |
| 135 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) { | |
| 136 if (!p->next_page()->is_valid()) { | |
| 137 ASSERT(space->last_page_ == p); | |
| 138 } | |
| 139 } | |
| 140 #endif | |
| 141 stop_page_ = space->last_page_; | |
| 142 break; | |
| 143 } | |
| 144 } | |
| 145 | |
| 146 | |
| 147 // ----------------------------------------------------------------------------- | |
| 148 // CodeRange | 134 // CodeRange |
| 149 | 135 |
| 150 | 136 |
| 151 CodeRange::CodeRange(Isolate* isolate) | 137 CodeRange::CodeRange(Isolate* isolate) |
| 152 : isolate_(isolate), | 138 : isolate_(isolate), |
| 153 code_range_(NULL), | 139 code_range_(NULL), |
| 154 free_list_(0), | 140 free_list_(0), |
| 155 allocation_list_(0), | 141 allocation_list_(0), |
| 156 current_allocation_block_index_(0) { | 142 current_allocation_block_index_(0) { |
| 157 } | 143 } |
| 158 | 144 |
| 159 | 145 |
| 160 bool CodeRange::Setup(const size_t requested) { | 146 bool CodeRange::Setup(const size_t requested) { |
| 161 ASSERT(code_range_ == NULL); | 147 ASSERT(code_range_ == NULL); |
| 162 | 148 |
| 163 code_range_ = new VirtualMemory(requested); | 149 code_range_ = new VirtualMemory(requested); |
| 164 CHECK(code_range_ != NULL); | 150 CHECK(code_range_ != NULL); |
| 165 if (!code_range_->IsReserved()) { | 151 if (!code_range_->IsReserved()) { |
| 166 delete code_range_; | 152 delete code_range_; |
| 167 code_range_ = NULL; | 153 code_range_ = NULL; |
| 168 return false; | 154 return false; |
| 169 } | 155 } |
| 170 | 156 |
| 171 // We are sure that we have mapped a block of requested addresses. | 157 // We are sure that we have mapped a block of requested addresses. |
| 172 ASSERT(code_range_->size() == requested); | 158 ASSERT(code_range_->size() == requested); |
| 173 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); | 159 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); |
| 174 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size())); | 160 Address base = reinterpret_cast<Address>(code_range_->address()); |
| 161 Address aligned_base = |
| 162 RoundUp(reinterpret_cast<Address>(code_range_->address()), |
| 163 MemoryChunk::kAlignment); |
| 164 size_t size = code_range_->size() - (aligned_base - base); |
| 165 allocation_list_.Add(FreeBlock(aligned_base, size)); |
| 175 current_allocation_block_index_ = 0; | 166 current_allocation_block_index_ = 0; |
| 176 return true; | 167 return true; |
| 177 } | 168 } |
| 178 | 169 |
| 179 | 170 |
| 180 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, | 171 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, |
| 181 const FreeBlock* right) { | 172 const FreeBlock* right) { |
| 182 // The entire point of CodeRange is that the difference between two | 173 // The entire point of CodeRange is that the difference between two |
| 183 // addresses in the range can be represented as a signed 32-bit int, | 174 // addresses in the range can be represented as a signed 32-bit int, |
| 184 // so the cast is semantically correct. | 175 // so the cast is semantically correct. |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 221 return; // Found a large enough allocation block. | 212 return; // Found a large enough allocation block. |
| 222 } | 213 } |
| 223 } | 214 } |
| 224 | 215 |
| 225 // Code range is full or too fragmented. | 216 // Code range is full or too fragmented. |
| 226 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); | 217 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); |
| 227 } | 218 } |
| 228 | 219 |
| 229 | 220 |
| 230 | 221 |
| 231 void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) { | 222 Address CodeRange::AllocateRawMemory(const size_t requested, |
| 223 size_t* allocated) { |
| 232 ASSERT(current_allocation_block_index_ < allocation_list_.length()); | 224 ASSERT(current_allocation_block_index_ < allocation_list_.length()); |
| 233 if (requested > allocation_list_[current_allocation_block_index_].size) { | 225 if (requested > allocation_list_[current_allocation_block_index_].size) { |
| 234 // Find an allocation block large enough. This function call may | 226 // Find an allocation block large enough. This function call may |
| 235 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. | 227 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. |
| 236 GetNextAllocationBlock(requested); | 228 GetNextAllocationBlock(requested); |
| 237 } | 229 } |
| 238 // Commit the requested memory at the start of the current allocation block. | 230 // Commit the requested memory at the start of the current allocation block. |
| 239 *allocated = RoundUp(requested, Page::kPageSize); | 231 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); |
| 240 FreeBlock current = allocation_list_[current_allocation_block_index_]; | 232 FreeBlock current = allocation_list_[current_allocation_block_index_]; |
| 241 if (*allocated >= current.size - Page::kPageSize) { | 233 if (aligned_requested >= (current.size - Page::kPageSize)) { |
| 242 // Don't leave a small free block, useless for a large object or chunk. | 234 // Don't leave a small free block, useless for a large object or chunk. |
| 243 *allocated = current.size; | 235 *allocated = current.size; |
| 236 } else { |
| 237 *allocated = aligned_requested; |
| 244 } | 238 } |
| 245 ASSERT(*allocated <= current.size); | 239 ASSERT(*allocated <= current.size); |
| 240 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); |
| 246 if (!code_range_->Commit(current.start, *allocated, true)) { | 241 if (!code_range_->Commit(current.start, *allocated, true)) { |
| 247 *allocated = 0; | 242 *allocated = 0; |
| 248 return NULL; | 243 return NULL; |
| 249 } | 244 } |
| 250 allocation_list_[current_allocation_block_index_].start += *allocated; | 245 allocation_list_[current_allocation_block_index_].start += *allocated; |
| 251 allocation_list_[current_allocation_block_index_].size -= *allocated; | 246 allocation_list_[current_allocation_block_index_].size -= *allocated; |
| 252 if (*allocated == current.size) { | 247 if (*allocated == current.size) { |
| 253 GetNextAllocationBlock(0); // This block is used up, get the next one. | 248 GetNextAllocationBlock(0); // This block is used up, get the next one. |
| 254 } | 249 } |
| 255 return current.start; | 250 return current.start; |
| 256 } | 251 } |
| 257 | 252 |
| 258 | 253 |
| 259 void CodeRange::FreeRawMemory(void* address, size_t length) { | 254 void CodeRange::FreeRawMemory(Address address, size_t length) { |
| 255 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); |
| 260 free_list_.Add(FreeBlock(address, length)); | 256 free_list_.Add(FreeBlock(address, length)); |
| 261 code_range_->Uncommit(address, length); | 257 code_range_->Uncommit(address, length); |
| 262 } | 258 } |
| 263 | 259 |
| 264 | 260 |
| 265 void CodeRange::TearDown() { | 261 void CodeRange::TearDown() { |
| 266 delete code_range_; // Frees all memory in the virtual memory range. | 262 delete code_range_; // Frees all memory in the virtual memory range. |
| 267 code_range_ = NULL; | 263 code_range_ = NULL; |
| 268 free_list_.Free(); | 264 free_list_.Free(); |
| 269 allocation_list_.Free(); | 265 allocation_list_.Free(); |
| 270 } | 266 } |
| 271 | 267 |
| 272 | 268 |
| 273 // ----------------------------------------------------------------------------- | 269 // ----------------------------------------------------------------------------- |
| 274 // MemoryAllocator | 270 // MemoryAllocator |
| 275 // | 271 // |
| 276 | 272 |
| 277 // 270 is an estimate based on the static default heap size of a pair of 256K | |
| 278 // semispaces and a 64M old generation. | |
| 279 const int kEstimatedNumberOfChunks = 270; | |
| 280 | |
| 281 | |
| 282 MemoryAllocator::MemoryAllocator(Isolate* isolate) | 273 MemoryAllocator::MemoryAllocator(Isolate* isolate) |
| 283 : isolate_(isolate), | 274 : isolate_(isolate), |
| 284 capacity_(0), | 275 capacity_(0), |
| 285 capacity_executable_(0), | 276 capacity_executable_(0), |
| 286 size_(0), | 277 size_(0), |
| 287 size_executable_(0), | 278 size_executable_(0) { |
| 288 initial_chunk_(NULL), | |
| 289 chunks_(kEstimatedNumberOfChunks), | |
| 290 free_chunk_ids_(kEstimatedNumberOfChunks), | |
| 291 max_nof_chunks_(0), | |
| 292 top_(0) { | |
| 293 } | |
| 294 | |
| 295 | |
| 296 void MemoryAllocator::Push(int free_chunk_id) { | |
| 297 ASSERT(max_nof_chunks_ > 0); | |
| 298 ASSERT(top_ < max_nof_chunks_); | |
| 299 free_chunk_ids_[top_++] = free_chunk_id; | |
| 300 } | |
| 301 | |
| 302 | |
| 303 int MemoryAllocator::Pop() { | |
| 304 ASSERT(top_ > 0); | |
| 305 return free_chunk_ids_[--top_]; | |
| 306 } | 279 } |
| 307 | 280 |
| 308 | 281 |
| 309 bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) { | 282 bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) { |
| 310 capacity_ = RoundUp(capacity, Page::kPageSize); | 283 capacity_ = RoundUp(capacity, Page::kPageSize); |
| 311 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); | 284 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); |
| 312 ASSERT_GE(capacity_, capacity_executable_); | 285 ASSERT_GE(capacity_, capacity_executable_); |
| 313 | 286 |
| 314 // Over-estimate the size of chunks_ array. It assumes the expansion of old | |
| 315 // space is always in the unit of a chunk (kChunkSize) except the last | |
| 316 // expansion. | |
| 317 // | |
| 318 // Due to alignment, allocated space might be one page less than required | |
| 319 // number (kPagesPerChunk) of pages for old spaces. | |
| 320 // | |
| 321 // Reserve two chunk ids for semispaces, one for map space, one for old | |
| 322 // space, and one for code space. | |
| 323 max_nof_chunks_ = | |
| 324 static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5; | |
| 325 if (max_nof_chunks_ > kMaxNofChunks) return false; | |
| 326 | |
| 327 size_ = 0; | 287 size_ = 0; |
| 328 size_executable_ = 0; | 288 size_executable_ = 0; |
| 329 ChunkInfo info; // uninitialized element. | 289 |
| 330 for (int i = max_nof_chunks_ - 1; i >= 0; i--) { | |
| 331 chunks_.Add(info); | |
| 332 free_chunk_ids_.Add(i); | |
| 333 } | |
| 334 top_ = max_nof_chunks_; | |
| 335 return true; | 290 return true; |
| 336 } | 291 } |
| 337 | 292 |
| 338 | 293 |
| 339 void MemoryAllocator::TearDown() { | 294 void MemoryAllocator::TearDown() { |
| 340 for (int i = 0; i < max_nof_chunks_; i++) { | 295 // Check that spaces were torn down before MemoryAllocator. |
| 341 if (chunks_[i].address() != NULL) DeleteChunk(i); | 296 ASSERT(size_ == 0); |
| 342 } | 297 // TODO(gc) this will be true again when we fix FreeMemory. |
| 343 chunks_.Clear(); | 298 // ASSERT(size_executable_ == 0); |
| 344 free_chunk_ids_.Clear(); | |
| 345 | |
| 346 if (initial_chunk_ != NULL) { | |
| 347 LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address())); | |
| 348 delete initial_chunk_; | |
| 349 initial_chunk_ = NULL; | |
| 350 } | |
| 351 | |
| 352 ASSERT(top_ == max_nof_chunks_); // all chunks are free | |
| 353 top_ = 0; | |
| 354 capacity_ = 0; | 299 capacity_ = 0; |
| 355 capacity_executable_ = 0; | 300 capacity_executable_ = 0; |
| 356 size_ = 0; | 301 } |
| 357 max_nof_chunks_ = 0; | 302 |
| 358 } | 303 |
| 359 | 304 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, |
| 360 | 305 Executability executable) { |
| 361 void* MemoryAllocator::AllocateRawMemory(const size_t requested, | 306 // TODO(gc) make code_range part of memory allocator? |
| 362 size_t* allocated, | 307 ASSERT(reservation->IsReserved()); |
| 363 Executability executable) { | 308 size_t size = reservation->size(); |
| 364 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) { | 309 ASSERT(size_ >= size); |
| 310 size_ -= size; |
| 311 |
| 312 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 313 |
| 314 if (executable == EXECUTABLE) { |
| 315 ASSERT(size_executable_ >= size); |
| 316 size_executable_ -= size; |
| 317 } |
| 318 // Code which is part of the code-range does not have its own VirtualMemory. |
| 319 ASSERT(!isolate_->code_range()->contains( |
| 320 static_cast<Address>(reservation->address()))); |
| 321 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); |
| 322 reservation->Release(); |
| 323 } |
| 324 |
| 325 |
| 326 void MemoryAllocator::FreeMemory(Address base, |
| 327 size_t size, |
| 328 Executability executable) { |
| 329 // TODO(gc) make code_range part of memory allocator? |
| 330 ASSERT(size_ >= size); |
| 331 size_ -= size; |
| 332 |
| 333 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 334 |
| 335 if (executable == EXECUTABLE) { |
| 336 ASSERT(size_executable_ >= size); |
| 337 size_executable_ -= size; |
| 338 } |
| 339 if (isolate_->code_range()->contains(static_cast<Address>(base))) { |
| 340 ASSERT(executable == EXECUTABLE); |
| 341 isolate_->code_range()->FreeRawMemory(base, size); |
| 342 } else { |
| 343 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); |
| 344 bool result = VirtualMemory::ReleaseRegion(base, size); |
| 345 USE(result); |
| 346 ASSERT(result); |
| 347 } |
| 348 } |
| 349 |
| 350 |
| 351 Address MemoryAllocator::ReserveAlignedMemory(size_t size, |
| 352 size_t alignment, |
| 353 VirtualMemory* controller) { |
| 354 VirtualMemory reservation(size, alignment); |
| 355 |
| 356 if (!reservation.IsReserved()) return NULL; |
| 357 size_ += reservation.size(); |
| 358 Address base = RoundUp(static_cast<Address>(reservation.address()), |
| 359 alignment); |
| 360 controller->TakeControl(&reservation); |
| 361 return base; |
| 362 } |
| 363 |
| 364 |
| 365 Address MemoryAllocator::AllocateAlignedMemory(size_t size, |
| 366 size_t alignment, |
| 367 Executability executable, |
| 368 VirtualMemory* controller) { |
| 369 VirtualMemory reservation; |
| 370 Address base = ReserveAlignedMemory(size, alignment, &reservation); |
| 371 if (base == NULL) return NULL; |
| 372 if (!reservation.Commit(base, |
| 373 size, |
| 374 executable == EXECUTABLE)) { |
| 365 return NULL; | 375 return NULL; |
| 366 } | 376 } |
| 367 | 377 controller->TakeControl(&reservation); |
| 368 void* mem; | 378 return base; |
| 379 } |
| 380 |
| 381 |
| 382 void Page::InitializeAsAnchor(PagedSpace* owner) { |
| 383 set_owner(owner); |
| 384 set_prev_page(this); |
| 385 set_next_page(this); |
| 386 } |
| 387 |
| 388 |
| 389 NewSpacePage* NewSpacePage::Initialize(Heap* heap, |
| 390 Address start, |
| 391 SemiSpace* semi_space) { |
| 392 MemoryChunk* chunk = MemoryChunk::Initialize(heap, |
| 393 start, |
| 394 Page::kPageSize, |
| 395 NOT_EXECUTABLE, |
| 396 semi_space); |
| 397 chunk->set_next_chunk(NULL); |
| 398 chunk->set_prev_chunk(NULL); |
| 399 chunk->initialize_scan_on_scavenge(true); |
| 400 bool in_to_space = (semi_space->id() != kFromSpace); |
| 401 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE |
| 402 : MemoryChunk::IN_FROM_SPACE); |
| 403 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE |
| 404 : MemoryChunk::IN_TO_SPACE)); |
| 405 NewSpacePage* page = static_cast<NewSpacePage*>(chunk); |
| 406 heap->incremental_marking()->SetNewSpacePageFlags(page); |
| 407 return page; |
| 408 } |
| 409 |
| 410 |
| 411 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { |
| 412 set_owner(semi_space); |
| 413 set_next_chunk(this); |
| 414 set_prev_chunk(this); |
| 415 // Flags marks this invalid page as not being in new-space. |
| 416 // All real new-space pages will be in new-space. |
| 417 SetFlags(0, ~0); |
| 418 } |
| 419 |
| 420 |
| 421 MemoryChunk* MemoryChunk::Initialize(Heap* heap, |
| 422 Address base, |
| 423 size_t size, |
| 424 Executability executable, |
| 425 Space* owner) { |
| 426 MemoryChunk* chunk = FromAddress(base); |
| 427 |
| 428 ASSERT(base == chunk->address()); |
| 429 |
| 430 chunk->heap_ = heap; |
| 431 chunk->size_ = size; |
| 432 chunk->flags_ = 0; |
| 433 chunk->set_owner(owner); |
| 434 chunk->InitializeReservedMemory(); |
| 435 chunk->slots_buffer_ = NULL; |
| 436 chunk->skip_list_ = NULL; |
| 437 chunk->ResetLiveBytes(); |
| 438 Bitmap::Clear(chunk); |
| 439 chunk->initialize_scan_on_scavenge(false); |
| 440 chunk->SetFlag(WAS_SWEPT_PRECISELY); |
| 441 |
| 442 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); |
| 443 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); |
| 444 |
| 445 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE); |
| 446 |
| 447 if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA); |
| 448 |
| 449 return chunk; |
| 450 } |
| 451 |
| 452 |
| 453 void MemoryChunk::InsertAfter(MemoryChunk* other) { |
| 454 next_chunk_ = other->next_chunk_; |
| 455 prev_chunk_ = other; |
| 456 other->next_chunk_->prev_chunk_ = this; |
| 457 other->next_chunk_ = this; |
| 458 } |
| 459 |
| 460 |
| 461 void MemoryChunk::Unlink() { |
| 462 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { |
| 463 heap_->decrement_scan_on_scavenge_pages(); |
| 464 ClearFlag(SCAN_ON_SCAVENGE); |
| 465 } |
| 466 next_chunk_->prev_chunk_ = prev_chunk_; |
| 467 prev_chunk_->next_chunk_ = next_chunk_; |
| 468 prev_chunk_ = NULL; |
| 469 next_chunk_ = NULL; |
| 470 } |
| 471 |
| 472 |
| 473 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
| 474 Executability executable, |
| 475 Space* owner) { |
| 476 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; |
| 477 Heap* heap = isolate_->heap(); |
| 478 Address base = NULL; |
| 479 VirtualMemory reservation; |
| 369 if (executable == EXECUTABLE) { | 480 if (executable == EXECUTABLE) { |
| 370 // Check executable memory limit. | 481 // Check executable memory limit. |
| 371 if (size_executable_ + requested > | 482 if (size_executable_ + chunk_size > capacity_executable_) { |
| 372 static_cast<size_t>(capacity_executable_)) { | |
| 373 LOG(isolate_, | 483 LOG(isolate_, |
| 374 StringEvent("MemoryAllocator::AllocateRawMemory", | 484 StringEvent("MemoryAllocator::AllocateRawMemory", |
| 375 "V8 Executable Allocation capacity exceeded")); | 485 "V8 Executable Allocation capacity exceeded")); |
| 376 return NULL; | 486 return NULL; |
| 377 } | 487 } |
| 488 |
| 378 // Allocate executable memory either from code range or from the | 489 // Allocate executable memory either from code range or from the |
| 379 // OS. | 490 // OS. |
| 380 if (isolate_->code_range()->exists()) { | 491 if (isolate_->code_range()->exists()) { |
| 381 mem = isolate_->code_range()->AllocateRawMemory(requested, allocated); | 492 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); |
| 493 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), |
| 494 MemoryChunk::kAlignment)); |
| 495 if (base == NULL) return NULL; |
| 496 size_ += chunk_size; |
| 497 // Update executable memory size. |
| 498 size_executable_ += chunk_size; |
| 382 } else { | 499 } else { |
| 383 mem = OS::Allocate(requested, allocated, true); | 500 base = AllocateAlignedMemory(chunk_size, |
| 501 MemoryChunk::kAlignment, |
| 502 executable, |
| 503 &reservation); |
| 504 if (base == NULL) return NULL; |
| 505 // Update executable memory size. |
| 506 size_executable_ += reservation.size(); |
| 384 } | 507 } |
| 385 // Update executable memory size. | |
| 386 size_executable_ += static_cast<int>(*allocated); | |
| 387 } else { | 508 } else { |
| 388 mem = OS::Allocate(requested, allocated, false); | 509 base = AllocateAlignedMemory(chunk_size, |
| 389 } | 510 MemoryChunk::kAlignment, |
| 390 int alloced = static_cast<int>(*allocated); | 511 executable, |
| 391 size_ += alloced; | 512 &reservation); |
| 513 |
| 514 if (base == NULL) return NULL; |
| 515 } |
| 392 | 516 |
| 393 #ifdef DEBUG | 517 #ifdef DEBUG |
| 394 ZapBlock(reinterpret_cast<Address>(mem), alloced); | 518 ZapBlock(base, chunk_size); |
| 395 #endif | 519 #endif |
| 396 isolate_->counters()->memory_allocated()->Increment(alloced); | 520 isolate_->counters()->memory_allocated()-> |
| 397 return mem; | 521 Increment(static_cast<int>(chunk_size)); |
| 398 } | 522 |
| 399 | 523 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); |
| 400 | 524 if (owner != NULL) { |
| 401 void MemoryAllocator::FreeRawMemory(void* mem, | 525 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); |
| 402 size_t length, | 526 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); |
| 527 } |
| 528 |
| 529 MemoryChunk* result = MemoryChunk::Initialize(heap, |
| 530 base, |
| 531 chunk_size, |
| 532 executable, |
| 533 owner); |
| 534 result->set_reserved_memory(&reservation); |
| 535 return result; |
| 536 } |
| 537 |
| 538 |
| 539 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, |
| 403 Executability executable) { | 540 Executability executable) { |
| 404 #ifdef DEBUG | 541 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); |
| 405 // Do not try to zap the guard page. | 542 |
| 406 size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0; | 543 if (chunk == NULL) return NULL; |
| 407 ZapBlock(reinterpret_cast<Address>(mem) + guard_size, length - guard_size); | 544 |
| 408 #endif | 545 return Page::Initialize(isolate_->heap(), chunk, executable, owner); |
| 409 if (isolate_->code_range()->contains(static_cast<Address>(mem))) { | 546 } |
| 410 isolate_->code_range()->FreeRawMemory(mem, length); | 547 |
| 548 |
| 549 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
| 550 Executability executable, |
| 551 Space* owner) { |
| 552 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); |
| 553 if (chunk == NULL) return NULL; |
| 554 return LargePage::Initialize(isolate_->heap(), chunk); |
| 555 } |
| 556 |
| 557 |
| 558 void MemoryAllocator::Free(MemoryChunk* chunk) { |
| 559 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| 560 if (chunk->owner() != NULL) { |
| 561 ObjectSpace space = |
| 562 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
| 563 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); |
| 564 } |
| 565 |
| 566 delete chunk->slots_buffer(); |
| 567 delete chunk->skip_list(); |
| 568 |
| 569 VirtualMemory* reservation = chunk->reserved_memory(); |
| 570 if (reservation->IsReserved()) { |
| 571 FreeMemory(reservation, chunk->executable()); |
| 411 } else { | 572 } else { |
| 412 OS::Free(mem, length); | 573 FreeMemory(chunk->address(), |
| 413 } | 574 chunk->size(), |
| 414 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length)); | 575 chunk->executable()); |
| 415 size_ -= static_cast<int>(length); | 576 } |
| 416 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length); | 577 } |
| 417 | 578 |
| 418 ASSERT(size_ >= 0); | 579 |
| 419 ASSERT(size_executable_ >= 0); | 580 bool MemoryAllocator::CommitBlock(Address start, |
| 420 } | 581 size_t size, |
| 421 | 582 Executability executable) { |
| 422 | 583 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; |
| 423 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, | |
| 424 AllocationAction action, | |
| 425 size_t size) { | |
| 426 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { | |
| 427 MemoryAllocationCallbackRegistration registration = | |
| 428 memory_allocation_callbacks_[i]; | |
| 429 if ((registration.space & space) == space && | |
| 430 (registration.action & action) == action) | |
| 431 registration.callback(space, action, static_cast<int>(size)); | |
| 432 } | |
| 433 } | |
| 434 | |
| 435 | |
| 436 bool MemoryAllocator::MemoryAllocationCallbackRegistered( | |
| 437 MemoryAllocationCallback callback) { | |
| 438 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { | |
| 439 if (memory_allocation_callbacks_[i].callback == callback) return true; | |
| 440 } | |
| 441 return false; | |
| 442 } | |
| 443 | |
| 444 | |
| 445 void MemoryAllocator::AddMemoryAllocationCallback( | |
| 446 MemoryAllocationCallback callback, | |
| 447 ObjectSpace space, | |
| 448 AllocationAction action) { | |
| 449 ASSERT(callback != NULL); | |
| 450 MemoryAllocationCallbackRegistration registration(callback, space, action); | |
| 451 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback)); | |
| 452 return memory_allocation_callbacks_.Add(registration); | |
| 453 } | |
| 454 | |
| 455 | |
| 456 void MemoryAllocator::RemoveMemoryAllocationCallback( | |
| 457 MemoryAllocationCallback callback) { | |
| 458 ASSERT(callback != NULL); | |
| 459 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { | |
| 460 if (memory_allocation_callbacks_[i].callback == callback) { | |
| 461 memory_allocation_callbacks_.Remove(i); | |
| 462 return; | |
| 463 } | |
| 464 } | |
| 465 UNREACHABLE(); | |
| 466 } | |
| 467 | |
| 468 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) { | |
| 469 ASSERT(initial_chunk_ == NULL); | |
| 470 | |
| 471 initial_chunk_ = new VirtualMemory(requested); | |
| 472 CHECK(initial_chunk_ != NULL); | |
| 473 if (!initial_chunk_->IsReserved()) { | |
| 474 delete initial_chunk_; | |
| 475 initial_chunk_ = NULL; | |
| 476 return NULL; | |
| 477 } | |
| 478 | |
| 479 // We are sure that we have mapped a block of requested addresses. | |
| 480 ASSERT(initial_chunk_->size() == requested); | |
| 481 LOG(isolate_, | |
| 482 NewEvent("InitialChunk", initial_chunk_->address(), requested)); | |
| 483 size_ += static_cast<int>(requested); | |
| 484 return initial_chunk_->address(); | |
| 485 } | |
| 486 | |
| 487 | |
| 488 static int PagesInChunk(Address start, size_t size) { | |
| 489 // The first page starts on the first page-aligned address from start onward | |
| 490 // and the last page ends on the last page-aligned address before | |
| 491 // start+size. Page::kPageSize is a power of two so we can divide by | |
| 492 // shifting. | |
| 493 return static_cast<int>((RoundDown(start + size, Page::kPageSize) | |
| 494 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits); | |
| 495 } | |
| 496 | |
| 497 | |
| 498 Page* MemoryAllocator::AllocatePages(int requested_pages, | |
| 499 int* allocated_pages, | |
| 500 PagedSpace* owner) { | |
| 501 if (requested_pages <= 0) return Page::FromAddress(NULL); | |
| 502 size_t chunk_size = requested_pages * Page::kPageSize; | |
| 503 | |
| 504 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable()); | |
| 505 if (chunk == NULL) return Page::FromAddress(NULL); | |
| 506 LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size)); | |
| 507 | |
| 508 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); | |
| 509 | |
| 510 // We may 'lose' a page due to alignment. | |
| 511 ASSERT(*allocated_pages >= kPagesPerChunk - 1); | |
| 512 | |
| 513 size_t guard_size = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0; | |
| 514 | |
| 515 // Check that we got at least one page that we can use. | |
| 516 if (*allocated_pages <= ((guard_size != 0) ? 1 : 0)) { | |
| 517 FreeRawMemory(chunk, | |
| 518 chunk_size, | |
| 519 owner->executable()); | |
| 520 LOG(isolate_, DeleteEvent("PagedChunk", chunk)); | |
| 521 return Page::FromAddress(NULL); | |
| 522 } | |
| 523 | |
| 524 if (guard_size != 0) { | |
| 525 OS::Guard(chunk, guard_size); | |
| 526 chunk_size -= guard_size; | |
| 527 chunk = static_cast<Address>(chunk) + guard_size; | |
| 528 --*allocated_pages; | |
| 529 } | |
| 530 | |
| 531 int chunk_id = Pop(); | |
| 532 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); | |
| 533 | |
| 534 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); | |
| 535 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); | |
| 536 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner); | |
| 537 | |
| 538 return new_pages; | |
| 539 } | |
| 540 | |
| 541 | |
| 542 Page* MemoryAllocator::CommitPages(Address start, size_t size, | |
| 543 PagedSpace* owner, int* num_pages) { | |
| 544 ASSERT(start != NULL); | |
| 545 *num_pages = PagesInChunk(start, size); | |
| 546 ASSERT(*num_pages > 0); | |
| 547 ASSERT(initial_chunk_ != NULL); | |
| 548 ASSERT(InInitialChunk(start)); | |
| 549 ASSERT(InInitialChunk(start + size - 1)); | |
| 550 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) { | |
| 551 return Page::FromAddress(NULL); | |
| 552 } | |
| 553 #ifdef DEBUG | 584 #ifdef DEBUG |
| 554 ZapBlock(start, size); | 585 ZapBlock(start, size); |
| 555 #endif | 586 #endif |
| 556 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); | |
| 557 | |
| 558 // So long as we correctly overestimated the number of chunks we should not | |
| 559 // run out of chunk ids. | |
| 560 CHECK(!OutOfChunkIds()); | |
| 561 int chunk_id = Pop(); | |
| 562 chunks_[chunk_id].init(start, size, owner); | |
| 563 return InitializePagesInChunk(chunk_id, *num_pages, owner); | |
| 564 } | |
| 565 | |
| 566 | |
| 567 bool MemoryAllocator::CommitBlock(Address start, | |
| 568 size_t size, | |
| 569 Executability executable) { | |
| 570 ASSERT(start != NULL); | |
| 571 ASSERT(size > 0); | |
| 572 ASSERT(initial_chunk_ != NULL); | |
| 573 ASSERT(InInitialChunk(start)); | |
| 574 ASSERT(InInitialChunk(start + size - 1)); | |
| 575 | |
| 576 if (!initial_chunk_->Commit(start, size, executable)) return false; | |
| 577 #ifdef DEBUG | |
| 578 ZapBlock(start, size); | |
| 579 #endif | |
| 580 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); | 587 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); |
| 581 return true; | 588 return true; |
| 582 } | 589 } |
| 583 | 590 |
| 584 | 591 |
| 585 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { | 592 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { |
| 586 ASSERT(start != NULL); | 593 if (!VirtualMemory::UncommitRegion(start, size)) return false; |
| 587 ASSERT(size > 0); | |
| 588 ASSERT(initial_chunk_ != NULL); | |
| 589 ASSERT(InInitialChunk(start)); | |
| 590 ASSERT(InInitialChunk(start + size - 1)); | |
| 591 | |
| 592 if (!initial_chunk_->Uncommit(start, size)) return false; | |
| 593 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 594 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 594 return true; | 595 return true; |
| 595 } | 596 } |
| 596 | 597 |
| 597 | 598 |
| 598 void MemoryAllocator::ZapBlock(Address start, size_t size) { | 599 void MemoryAllocator::ZapBlock(Address start, size_t size) { |
| 599 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { | 600 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { |
| 600 Memory::Address_at(start + s) = kZapValue; | 601 Memory::Address_at(start + s) = kZapValue; |
| 601 } | 602 } |
| 602 } | 603 } |
| 603 | 604 |
| 604 | 605 |
| 605 Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk, | 606 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, |
| 606 PagedSpace* owner) { | 607 AllocationAction action, |
| 607 ASSERT(IsValidChunk(chunk_id)); | 608 size_t size) { |
| 608 ASSERT(pages_in_chunk > 0); | 609 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
| 609 | 610 MemoryAllocationCallbackRegistration registration = |
| 610 Address chunk_start = chunks_[chunk_id].address(); | 611 memory_allocation_callbacks_[i]; |
| 611 | 612 if ((registration.space & space) == space && |
| 612 Address low = RoundUp(chunk_start, Page::kPageSize); | 613 (registration.action & action) == action) |
| 613 | 614 registration.callback(space, action, static_cast<int>(size)); |
| 614 #ifdef DEBUG | |
| 615 size_t chunk_size = chunks_[chunk_id].size(); | |
| 616 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); | |
| 617 ASSERT(pages_in_chunk <= | |
| 618 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize)); | |
| 619 #endif | |
| 620 | |
| 621 Address page_addr = low; | |
| 622 for (int i = 0; i < pages_in_chunk; i++) { | |
| 623 Page* p = Page::FromAddress(page_addr); | |
| 624 p->heap_ = owner->heap(); | |
| 625 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; | |
| 626 p->InvalidateWatermark(true); | |
| 627 p->SetIsLargeObjectPage(false); | |
| 628 p->SetAllocationWatermark(p->ObjectAreaStart()); | |
| 629 p->SetCachedAllocationWatermark(p->ObjectAreaStart()); | |
| 630 page_addr += Page::kPageSize; | |
| 631 } | |
| 632 | |
| 633 // Set the next page of the last page to 0. | |
| 634 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize); | |
| 635 last_page->opaque_header = OffsetFrom(0) | chunk_id; | |
| 636 | |
| 637 return Page::FromAddress(low); | |
| 638 } | |
| 639 | |
| 640 | |
| 641 Page* MemoryAllocator::FreePages(Page* p) { | |
| 642 if (!p->is_valid()) return p; | |
| 643 | |
| 644 // Find the first page in the same chunk as 'p' | |
| 645 Page* first_page = FindFirstPageInSameChunk(p); | |
| 646 Page* page_to_return = Page::FromAddress(NULL); | |
| 647 | |
| 648 if (p != first_page) { | |
| 649 // Find the last page in the same chunk as 'prev'. | |
| 650 Page* last_page = FindLastPageInSameChunk(p); | |
| 651 first_page = GetNextPage(last_page); // first page in next chunk | |
| 652 | |
| 653 // set the next_page of last_page to NULL | |
| 654 SetNextPage(last_page, Page::FromAddress(NULL)); | |
| 655 page_to_return = p; // return 'p' when exiting | |
| 656 } | |
| 657 | |
| 658 while (first_page->is_valid()) { | |
| 659 int chunk_id = GetChunkId(first_page); | |
| 660 ASSERT(IsValidChunk(chunk_id)); | |
| 661 | |
| 662 // Find the first page of the next chunk before deleting this chunk. | |
| 663 first_page = GetNextPage(FindLastPageInSameChunk(first_page)); | |
| 664 | |
| 665 // Free the current chunk. | |
| 666 DeleteChunk(chunk_id); | |
| 667 } | |
| 668 | |
| 669 return page_to_return; | |
| 670 } | |
| 671 | |
| 672 | |
| 673 void MemoryAllocator::FreeAllPages(PagedSpace* space) { | |
| 674 for (int i = 0, length = chunks_.length(); i < length; i++) { | |
| 675 if (chunks_[i].owner() == space) { | |
| 676 DeleteChunk(i); | |
| 677 } | |
| 678 } | 615 } |
| 679 } | 616 } |
| 680 | 617 |
| 681 | 618 |
| 682 void MemoryAllocator::DeleteChunk(int chunk_id) { | 619 bool MemoryAllocator::MemoryAllocationCallbackRegistered( |
| 683 ASSERT(IsValidChunk(chunk_id)); | 620 MemoryAllocationCallback callback) { |
| 684 | 621 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
| 685 ChunkInfo& c = chunks_[chunk_id]; | 622 if (memory_allocation_callbacks_[i].callback == callback) return true; |
| 686 | |
| 687 // We cannot free a chunk contained in the initial chunk because it was not | |
| 688 // allocated with AllocateRawMemory. Instead we uncommit the virtual | |
| 689 // memory. | |
| 690 if (InInitialChunk(c.address())) { | |
| 691 // TODO(1240712): VirtualMemory::Uncommit has a return value which | |
| 692 // is ignored here. | |
| 693 initial_chunk_->Uncommit(c.address(), c.size()); | |
| 694 Counters* counters = isolate_->counters(); | |
| 695 counters->memory_allocated()->Decrement(static_cast<int>(c.size())); | |
| 696 } else { | |
| 697 LOG(isolate_, DeleteEvent("PagedChunk", c.address())); | |
| 698 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity()); | |
| 699 size_t size = c.size(); | |
| 700 size_t guard_size = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0; | |
| 701 FreeRawMemory(c.address() - guard_size, size + guard_size, c.executable()); | |
| 702 PerformAllocationCallback(space, kAllocationActionFree, size); | |
| 703 } | 623 } |
| 704 c.init(NULL, 0, NULL); | 624 return false; |
| 705 Push(chunk_id); | |
| 706 } | 625 } |
| 707 | 626 |
| 708 | 627 |
| 709 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) { | 628 void MemoryAllocator::AddMemoryAllocationCallback( |
| 710 int chunk_id = GetChunkId(p); | 629 MemoryAllocationCallback callback, |
| 711 ASSERT(IsValidChunk(chunk_id)); | 630 ObjectSpace space, |
| 712 | 631 AllocationAction action) { |
| 713 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize); | 632 ASSERT(callback != NULL); |
| 714 return Page::FromAddress(low); | 633 MemoryAllocationCallbackRegistration registration(callback, space, action); |
| 634 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback)); |
| 635 return memory_allocation_callbacks_.Add(registration); |
| 715 } | 636 } |
| 716 | 637 |
| 717 | 638 |
| 718 Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) { | 639 void MemoryAllocator::RemoveMemoryAllocationCallback( |
| 719 int chunk_id = GetChunkId(p); | 640 MemoryAllocationCallback callback) { |
| 720 ASSERT(IsValidChunk(chunk_id)); | 641 ASSERT(callback != NULL); |
| 721 | 642 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
| 722 Address chunk_start = chunks_[chunk_id].address(); | 643 if (memory_allocation_callbacks_[i].callback == callback) { |
| 723 size_t chunk_size = chunks_[chunk_id].size(); | 644 memory_allocation_callbacks_.Remove(i); |
| 724 | 645 return; |
| 725 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); | 646 } |
| 726 ASSERT(chunk_start <= p->address() && p->address() < high); | 647 } |
| 727 | 648 UNREACHABLE(); |
| 728 return Page::FromAddress(high - Page::kPageSize); | |
| 729 } | 649 } |
| 730 | 650 |
| 731 | 651 |
| 732 #ifdef DEBUG | 652 #ifdef DEBUG |
| 733 void MemoryAllocator::ReportStatistics() { | 653 void MemoryAllocator::ReportStatistics() { |
| 734 float pct = static_cast<float>(capacity_ - size_) / capacity_; | 654 float pct = static_cast<float>(capacity_ - size_) / capacity_; |
| 735 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 655 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 736 ", used: %" V8_PTR_PREFIX "d" | 656 ", used: %" V8_PTR_PREFIX "d" |
| 737 ", available: %%%d\n\n", | 657 ", available: %%%d\n\n", |
| 738 capacity_, size_, static_cast<int>(pct*100)); | 658 capacity_, size_, static_cast<int>(pct*100)); |
| 739 } | 659 } |
| 740 #endif | 660 #endif |
| 741 | 661 |
| 742 | |
| 743 void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space, | |
| 744 Page** first_page, | |
| 745 Page** last_page, | |
| 746 Page** last_page_in_use) { | |
| 747 Page* first = NULL; | |
| 748 Page* last = NULL; | |
| 749 | |
| 750 for (int i = 0, length = chunks_.length(); i < length; i++) { | |
| 751 ChunkInfo& chunk = chunks_[i]; | |
| 752 | |
| 753 if (chunk.owner() == space) { | |
| 754 if (first == NULL) { | |
| 755 Address low = RoundUp(chunk.address(), Page::kPageSize); | |
| 756 first = Page::FromAddress(low); | |
| 757 } | |
| 758 last = RelinkPagesInChunk(i, | |
| 759 chunk.address(), | |
| 760 chunk.size(), | |
| 761 last, | |
| 762 last_page_in_use); | |
| 763 } | |
| 764 } | |
| 765 | |
| 766 if (first_page != NULL) { | |
| 767 *first_page = first; | |
| 768 } | |
| 769 | |
| 770 if (last_page != NULL) { | |
| 771 *last_page = last; | |
| 772 } | |
| 773 } | |
| 774 | |
| 775 | |
| 776 Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id, | |
| 777 Address chunk_start, | |
| 778 size_t chunk_size, | |
| 779 Page* prev, | |
| 780 Page** last_page_in_use) { | |
| 781 Address page_addr = RoundUp(chunk_start, Page::kPageSize); | |
| 782 int pages_in_chunk = PagesInChunk(chunk_start, chunk_size); | |
| 783 | |
| 784 if (prev->is_valid()) { | |
| 785 SetNextPage(prev, Page::FromAddress(page_addr)); | |
| 786 } | |
| 787 | |
| 788 for (int i = 0; i < pages_in_chunk; i++) { | |
| 789 Page* p = Page::FromAddress(page_addr); | |
| 790 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; | |
| 791 page_addr += Page::kPageSize; | |
| 792 | |
| 793 p->InvalidateWatermark(true); | |
| 794 if (p->WasInUseBeforeMC()) { | |
| 795 *last_page_in_use = p; | |
| 796 } | |
| 797 } | |
| 798 | |
| 799 // Set the next page of the last page to 0. | |
| 800 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize); | |
| 801 last_page->opaque_header = OffsetFrom(0) | chunk_id; | |
| 802 | |
| 803 if (last_page->WasInUseBeforeMC()) { | |
| 804 *last_page_in_use = last_page; | |
| 805 } | |
| 806 | |
| 807 return last_page; | |
| 808 } | |
| 809 | |
| 810 | |
| 811 // ----------------------------------------------------------------------------- | 662 // ----------------------------------------------------------------------------- |
| 812 // PagedSpace implementation | 663 // PagedSpace implementation |
| 813 | 664 |
| 814 PagedSpace::PagedSpace(Heap* heap, | 665 PagedSpace::PagedSpace(Heap* heap, |
| 815 intptr_t max_capacity, | 666 intptr_t max_capacity, |
| 816 AllocationSpace id, | 667 AllocationSpace id, |
| 817 Executability executable) | 668 Executability executable) |
| 818 : Space(heap, id, executable) { | 669 : Space(heap, id, executable), |
| 670 free_list_(this), |
| 671 was_swept_conservatively_(false), |
| 672 first_unswept_page_(Page::FromAddress(NULL)), |
| 673 last_unswept_page_(Page::FromAddress(NULL)) { |
| 819 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) | 674 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) |
| 820 * Page::kObjectAreaSize; | 675 * Page::kObjectAreaSize; |
| 821 accounting_stats_.Clear(); | 676 accounting_stats_.Clear(); |
| 822 | 677 |
| 823 allocation_info_.top = NULL; | 678 allocation_info_.top = NULL; |
| 824 allocation_info_.limit = NULL; | 679 allocation_info_.limit = NULL; |
| 825 | 680 |
| 826 mc_forwarding_info_.top = NULL; | 681 anchor_.InitializeAsAnchor(this); |
| 827 mc_forwarding_info_.limit = NULL; | |
| 828 } | 682 } |
| 829 | 683 |
| 830 | 684 |
| 831 bool PagedSpace::Setup(Address start, size_t size) { | 685 bool PagedSpace::Setup() { |
| 832 if (HasBeenSetup()) return false; | |
| 833 | |
| 834 int num_pages = 0; | |
| 835 // Try to use the virtual memory range passed to us. If it is too small to | |
| 836 // contain at least one page, ignore it and allocate instead. | |
| 837 int pages_in_chunk = PagesInChunk(start, size); | |
| 838 if (pages_in_chunk > 0) { | |
| 839 first_page_ = Isolate::Current()->memory_allocator()->CommitPages( | |
| 840 RoundUp(start, Page::kPageSize), | |
| 841 Page::kPageSize * pages_in_chunk, | |
| 842 this, &num_pages); | |
| 843 } else { | |
| 844 int requested_pages = | |
| 845 Min(MemoryAllocator::kPagesPerChunk, | |
| 846 static_cast<int>(max_capacity_ / Page::kObjectAreaSize)); | |
| 847 first_page_ = | |
| 848 Isolate::Current()->memory_allocator()->AllocatePages( | |
| 849 requested_pages, &num_pages, this); | |
| 850 if (!first_page_->is_valid()) return false; | |
| 851 } | |
| 852 | |
| 853 // We are sure that the first page is valid and that we have at least one | |
| 854 // page. | |
| 855 ASSERT(first_page_->is_valid()); | |
| 856 ASSERT(num_pages > 0); | |
| 857 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize); | |
| 858 ASSERT(Capacity() <= max_capacity_); | |
| 859 | |
| 860 // Sequentially clear region marks in the newly allocated | |
| 861 // pages and cache the current last page in the space. | |
| 862 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { | |
| 863 p->SetRegionMarks(Page::kAllRegionsCleanMarks); | |
| 864 last_page_ = p; | |
| 865 } | |
| 866 | |
| 867 // Use first_page_ for allocation. | |
| 868 SetAllocationInfo(&allocation_info_, first_page_); | |
| 869 | |
| 870 page_list_is_chunk_ordered_ = true; | |
| 871 | |
| 872 return true; | 686 return true; |
| 873 } | 687 } |
| 874 | 688 |
| 875 | 689 |
| 876 bool PagedSpace::HasBeenSetup() { | 690 bool PagedSpace::HasBeenSetup() { |
| 877 return (Capacity() > 0); | 691 return true; |
| 878 } | 692 } |
| 879 | 693 |
| 880 | 694 |
| 881 void PagedSpace::TearDown() { | 695 void PagedSpace::TearDown() { |
| 882 Isolate::Current()->memory_allocator()->FreeAllPages(this); | 696 PageIterator iterator(this); |
| 883 first_page_ = NULL; | 697 while (iterator.has_next()) { |
| 698 heap()->isolate()->memory_allocator()->Free(iterator.next()); |
| 699 } |
| 700 anchor_.set_next_page(&anchor_); |
| 701 anchor_.set_prev_page(&anchor_); |
| 884 accounting_stats_.Clear(); | 702 accounting_stats_.Clear(); |
| 885 } | 703 } |
| 886 | 704 |
| 887 | 705 |
| 888 void PagedSpace::MarkAllPagesClean() { | |
| 889 PageIterator it(this, PageIterator::ALL_PAGES); | |
| 890 while (it.has_next()) { | |
| 891 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks); | |
| 892 } | |
| 893 } | |
| 894 | |
| 895 | |
| 896 MaybeObject* PagedSpace::FindObject(Address addr) { | 706 MaybeObject* PagedSpace::FindObject(Address addr) { |
| 897 // Note: this function can only be called before or after mark-compact GC | 707 // Note: this function can only be called on precisely swept spaces. |
| 898 // because it accesses map pointers. | |
| 899 ASSERT(!heap()->mark_compact_collector()->in_use()); | 708 ASSERT(!heap()->mark_compact_collector()->in_use()); |
| 900 | 709 |
| 901 if (!Contains(addr)) return Failure::Exception(); | 710 if (!Contains(addr)) return Failure::Exception(); |
| 902 | 711 |
| 903 Page* p = Page::FromAddress(addr); | 712 Page* p = Page::FromAddress(addr); |
| 904 ASSERT(IsUsed(p)); | 713 HeapObjectIterator it(p, NULL); |
| 905 Address cur = p->ObjectAreaStart(); | 714 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 906 Address end = p->AllocationTop(); | 715 Address cur = obj->address(); |
| 907 while (cur < end) { | |
| 908 HeapObject* obj = HeapObject::FromAddress(cur); | |
| 909 Address next = cur + obj->Size(); | 716 Address next = cur + obj->Size(); |
| 910 if ((cur <= addr) && (addr < next)) return obj; | 717 if ((cur <= addr) && (addr < next)) return obj; |
| 911 cur = next; | |
| 912 } | 718 } |
| 913 | 719 |
| 914 UNREACHABLE(); | 720 UNREACHABLE(); |
| 915 return Failure::Exception(); | 721 return Failure::Exception(); |
| 916 } | 722 } |
| 917 | 723 |
| 918 | 724 bool PagedSpace::CanExpand() { |
| 919 bool PagedSpace::IsUsed(Page* page) { | |
| 920 PageIterator it(this, PageIterator::PAGES_IN_USE); | |
| 921 while (it.has_next()) { | |
| 922 if (page == it.next()) return true; | |
| 923 } | |
| 924 return false; | |
| 925 } | |
| 926 | |
| 927 | |
| 928 void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) { | |
| 929 alloc_info->top = p->ObjectAreaStart(); | |
| 930 alloc_info->limit = p->ObjectAreaEnd(); | |
| 931 ASSERT(alloc_info->VerifyPagedAllocation()); | |
| 932 } | |
| 933 | |
| 934 | |
| 935 void PagedSpace::MCResetRelocationInfo() { | |
| 936 // Set page indexes. | |
| 937 int i = 0; | |
| 938 PageIterator it(this, PageIterator::ALL_PAGES); | |
| 939 while (it.has_next()) { | |
| 940 Page* p = it.next(); | |
| 941 p->mc_page_index = i++; | |
| 942 } | |
| 943 | |
| 944 // Set mc_forwarding_info_ to the first page in the space. | |
| 945 SetAllocationInfo(&mc_forwarding_info_, first_page_); | |
| 946 // All the bytes in the space are 'available'. We will rediscover | |
| 947 // allocated and wasted bytes during GC. | |
| 948 accounting_stats_.Reset(); | |
| 949 } | |
| 950 | |
| 951 | |
| 952 int PagedSpace::MCSpaceOffsetForAddress(Address addr) { | |
| 953 #ifdef DEBUG | |
| 954 // The Contains function considers the address at the beginning of a | |
| 955 // page in the page, MCSpaceOffsetForAddress considers it is in the | |
| 956 // previous page. | |
| 957 if (Page::IsAlignedToPageSize(addr)) { | |
| 958 ASSERT(Contains(addr - kPointerSize)); | |
| 959 } else { | |
| 960 ASSERT(Contains(addr)); | |
| 961 } | |
| 962 #endif | |
| 963 | |
| 964 // If addr is at the end of a page, it belongs to previous page | |
| 965 Page* p = Page::IsAlignedToPageSize(addr) | |
| 966 ? Page::FromAllocationTop(addr) | |
| 967 : Page::FromAddress(addr); | |
| 968 int index = p->mc_page_index; | |
| 969 return (index * Page::kPageSize) + p->Offset(addr); | |
| 970 } | |
| 971 | |
| 972 | |
| 973 // Slow case for reallocating and promoting objects during a compacting | |
| 974 // collection. This function is not space-specific. | |
| 975 HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) { | |
| 976 Page* current_page = TopPageOf(mc_forwarding_info_); | |
| 977 if (!current_page->next_page()->is_valid()) { | |
| 978 if (!Expand(current_page)) { | |
| 979 return NULL; | |
| 980 } | |
| 981 } | |
| 982 | |
| 983 // There are surely more pages in the space now. | |
| 984 ASSERT(current_page->next_page()->is_valid()); | |
| 985 // We do not add the top of page block for current page to the space's | |
| 986 // free list---the block may contain live objects so we cannot write | |
| 987 // bookkeeping information to it. Instead, we will recover top of page | |
| 988 // blocks when we move objects to their new locations. | |
| 989 // | |
| 990 // We do however write the allocation pointer to the page. The encoding | |
| 991 // of forwarding addresses is as an offset in terms of live bytes, so we | |
| 992 // need quick access to the allocation top of each page to decode | |
| 993 // forwarding addresses. | |
| 994 current_page->SetAllocationWatermark(mc_forwarding_info_.top); | |
| 995 current_page->next_page()->InvalidateWatermark(true); | |
| 996 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page()); | |
| 997 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes); | |
| 998 } | |
| 999 | |
| 1000 | |
| 1001 bool PagedSpace::Expand(Page* last_page) { | |
| 1002 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); | 725 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); |
| 1003 ASSERT(Capacity() % Page::kObjectAreaSize == 0); | 726 ASSERT(Capacity() % Page::kObjectAreaSize == 0); |
| 1004 | 727 |
| 1005 if (Capacity() == max_capacity_) return false; | 728 if (Capacity() == max_capacity_) return false; |
| 1006 | 729 |
| 1007 ASSERT(Capacity() < max_capacity_); | 730 ASSERT(Capacity() < max_capacity_); |
| 1008 // Last page must be valid and its next page is invalid. | |
| 1009 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid()); | |
| 1010 | 731 |
| 1011 int available_pages = | 732 // Are we going to exceed capacity for this space? |
| 1012 static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize); | 733 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; |
| 1013 // We don't want to have to handle small chunks near the end so if there are | |
| 1014 // not kPagesPerChunk pages available without exceeding the max capacity then | |
| 1015 // act as if memory has run out. | |
| 1016 if (available_pages < MemoryAllocator::kPagesPerChunk) return false; | |
| 1017 | 734 |
| 1018 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk); | 735 return true; |
| 1019 Page* p = heap()->isolate()->memory_allocator()->AllocatePages( | 736 } |
| 1020 desired_pages, &desired_pages, this); | |
| 1021 if (!p->is_valid()) return false; | |
| 1022 | 737 |
| 1023 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize); | 738 bool PagedSpace::Expand() { |
| 739 if (!CanExpand()) return false; |
| 740 |
| 741 Page* p = heap()->isolate()->memory_allocator()-> |
| 742 AllocatePage(this, executable()); |
| 743 if (p == NULL) return false; |
| 744 |
| 1024 ASSERT(Capacity() <= max_capacity_); | 745 ASSERT(Capacity() <= max_capacity_); |
| 1025 | 746 |
| 1026 heap()->isolate()->memory_allocator()->SetNextPage(last_page, p); | 747 p->InsertAfter(anchor_.prev_page()); |
| 1027 | |
| 1028 // Sequentially clear region marks of new pages and and cache the | |
| 1029 // new last page in the space. | |
| 1030 while (p->is_valid()) { | |
| 1031 p->SetRegionMarks(Page::kAllRegionsCleanMarks); | |
| 1032 last_page_ = p; | |
| 1033 p = p->next_page(); | |
| 1034 } | |
| 1035 | 748 |
| 1036 return true; | 749 return true; |
| 1037 } | 750 } |
| 1038 | 751 |
| 1039 | 752 |
| 1040 #ifdef DEBUG | 753 #ifdef DEBUG |
| 1041 int PagedSpace::CountTotalPages() { | 754 int PagedSpace::CountTotalPages() { |
| 755 PageIterator it(this); |
| 1042 int count = 0; | 756 int count = 0; |
| 1043 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { | 757 while (it.has_next()) { |
| 758 it.next(); |
| 1044 count++; | 759 count++; |
| 1045 } | 760 } |
| 1046 return count; | 761 return count; |
| 1047 } | 762 } |
| 1048 #endif | 763 #endif |
| 1049 | 764 |
| 1050 | 765 |
| 1051 void PagedSpace::Shrink() { | 766 void PagedSpace::ReleasePage(Page* page) { |
| 1052 if (!page_list_is_chunk_ordered_) { | 767 ASSERT(page->LiveBytes() == 0); |
| 1053 // We can't shrink space if pages is not chunk-ordered | 768 page->Unlink(); |
| 1054 // (see comment for class MemoryAllocator for definition). | 769 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { |
| 1055 return; | 770 heap()->isolate()->memory_allocator()->Free(page); |
| 771 } else { |
| 772 heap()->QueueMemoryChunkForFree(page); |
| 1056 } | 773 } |
| 1057 | 774 |
| 1058 // Release half of free pages. | 775 ASSERT(Capacity() > 0); |
| 1059 Page* top_page = AllocationTopPage(); | 776 ASSERT(Capacity() % Page::kObjectAreaSize == 0); |
| 1060 ASSERT(top_page->is_valid()); | 777 accounting_stats_.ShrinkSpace(Page::kObjectAreaSize); |
| 1061 | |
| 1062 // Count the number of pages we would like to free. | |
| 1063 int pages_to_free = 0; | |
| 1064 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) { | |
| 1065 pages_to_free++; | |
| 1066 } | |
| 1067 | |
| 1068 // Free pages after top_page. | |
| 1069 Page* p = heap()->isolate()->memory_allocator()-> | |
| 1070 FreePages(top_page->next_page()); | |
| 1071 heap()->isolate()->memory_allocator()->SetNextPage(top_page, p); | |
| 1072 | |
| 1073 // Find out how many pages we failed to free and update last_page_. | |
| 1074 // Please note pages can only be freed in whole chunks. | |
| 1075 last_page_ = top_page; | |
| 1076 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) { | |
| 1077 pages_to_free--; | |
| 1078 last_page_ = p; | |
| 1079 } | |
| 1080 | |
| 1081 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize); | |
| 1082 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize); | |
| 1083 } | 778 } |
| 1084 | 779 |
| 1085 | 780 |
| 1086 bool PagedSpace::EnsureCapacity(int capacity) { | 781 void PagedSpace::ReleaseAllUnusedPages() { |
| 1087 if (Capacity() >= capacity) return true; | 782 PageIterator it(this); |
| 1088 | 783 while (it.has_next()) { |
| 1089 // Start from the allocation top and loop to the last page in the space. | 784 Page* page = it.next(); |
| 1090 Page* last_page = AllocationTopPage(); | 785 if (page->LiveBytes() == 0) { |
| 1091 Page* next_page = last_page->next_page(); | 786 ReleasePage(page); |
| 1092 while (next_page->is_valid()) { | 787 } |
| 1093 last_page = heap()->isolate()->memory_allocator()-> | |
| 1094 FindLastPageInSameChunk(next_page); | |
| 1095 next_page = last_page->next_page(); | |
| 1096 } | 788 } |
| 1097 | 789 heap()->FreeQueuedChunks(); |
| 1098 // Expand the space until it has the required capacity or expansion fails. | |
| 1099 do { | |
| 1100 if (!Expand(last_page)) return false; | |
| 1101 ASSERT(last_page->next_page()->is_valid()); | |
| 1102 last_page = | |
| 1103 heap()->isolate()->memory_allocator()->FindLastPageInSameChunk( | |
| 1104 last_page->next_page()); | |
| 1105 } while (Capacity() < capacity); | |
| 1106 | |
| 1107 return true; | |
| 1108 } | 790 } |
| 1109 | 791 |
| 1110 | 792 |
| 1111 #ifdef DEBUG | 793 #ifdef DEBUG |
| 1112 void PagedSpace::Print() { } | 794 void PagedSpace::Print() { } |
| 1113 #endif | 795 #endif |
| 1114 | 796 |
| 1115 | 797 |
| 1116 #ifdef DEBUG | 798 #ifdef DEBUG |
| 1117 // We do not assume that the PageIterator works, because it depends on the | |
| 1118 // invariants we are checking during verification. | |
| 1119 void PagedSpace::Verify(ObjectVisitor* visitor) { | 799 void PagedSpace::Verify(ObjectVisitor* visitor) { |
| 1120 // The allocation pointer should be valid, and it should be in a page in the | 800 // We can only iterate over the pages if they were swept precisely. |
| 1121 // space. | 801 if (was_swept_conservatively_) return; |
| 1122 ASSERT(allocation_info_.VerifyPagedAllocation()); | |
| 1123 Page* top_page = Page::FromAllocationTop(allocation_info_.top); | |
| 1124 ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this)); | |
| 1125 | 802 |
| 1126 // Loop over all the pages. | 803 bool allocation_pointer_found_in_space = |
| 1127 bool above_allocation_top = false; | 804 (allocation_info_.top == allocation_info_.limit); |
| 1128 Page* current_page = first_page_; | 805 PageIterator page_iterator(this); |
| 1129 while (current_page->is_valid()) { | 806 while (page_iterator.has_next()) { |
| 1130 if (above_allocation_top) { | 807 Page* page = page_iterator.next(); |
| 1131 // We don't care what's above the allocation top. | 808 ASSERT(page->owner() == this); |
| 1132 } else { | 809 if (page == Page::FromAllocationTop(allocation_info_.top)) { |
| 1133 Address top = current_page->AllocationTop(); | 810 allocation_pointer_found_in_space = true; |
| 1134 if (current_page == top_page) { | 811 } |
| 1135 ASSERT(top == allocation_info_.top); | 812 ASSERT(page->WasSweptPrecisely()); |
| 1136 // The next page will be above the allocation top. | 813 HeapObjectIterator it(page, NULL); |
| 1137 above_allocation_top = true; | 814 Address end_of_previous_object = page->ObjectAreaStart(); |
| 815 Address top = page->ObjectAreaEnd(); |
| 816 int black_size = 0; |
| 817 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { |
| 818 ASSERT(end_of_previous_object <= object->address()); |
| 819 |
| 820 // The first word should be a map, and we expect all map pointers to |
| 821 // be in map space. |
| 822 Map* map = object->map(); |
| 823 ASSERT(map->IsMap()); |
| 824 ASSERT(heap()->map_space()->Contains(map)); |
| 825 |
| 826 // Perform space-specific object verification. |
| 827 VerifyObject(object); |
| 828 |
| 829 // The object itself should look OK. |
| 830 object->Verify(); |
| 831 |
| 832 // All the interior pointers should be contained in the heap. |
| 833 int size = object->Size(); |
| 834 object->IterateBody(map->instance_type(), size, visitor); |
| 835 if (Marking::IsBlack(Marking::MarkBitFrom(object))) { |
| 836 black_size += size; |
| 1138 } | 837 } |
| 1139 | 838 |
| 1140 // It should be packed with objects from the bottom to the top. | 839 ASSERT(object->address() + size <= top); |
| 1141 Address current = current_page->ObjectAreaStart(); | 840 end_of_previous_object = object->address() + size; |
| 1142 while (current < top) { | |
| 1143 HeapObject* object = HeapObject::FromAddress(current); | |
| 1144 | |
| 1145 // The first word should be a map, and we expect all map pointers to | |
| 1146 // be in map space. | |
| 1147 Map* map = object->map(); | |
| 1148 ASSERT(map->IsMap()); | |
| 1149 ASSERT(heap()->map_space()->Contains(map)); | |
| 1150 | |
| 1151 // Perform space-specific object verification. | |
| 1152 VerifyObject(object); | |
| 1153 | |
| 1154 // The object itself should look OK. | |
| 1155 object->Verify(); | |
| 1156 | |
| 1157 // All the interior pointers should be contained in the heap and | |
| 1158 // have page regions covering intergenerational references should be | |
| 1159 // marked dirty. | |
| 1160 int size = object->Size(); | |
| 1161 object->IterateBody(map->instance_type(), size, visitor); | |
| 1162 | |
| 1163 current += size; | |
| 1164 } | |
| 1165 | |
| 1166 // The allocation pointer should not be in the middle of an object. | |
| 1167 ASSERT(current == top); | |
| 1168 } | 841 } |
| 1169 | 842 ASSERT_LE(black_size, page->LiveBytes()); |
| 1170 current_page = current_page->next_page(); | |
| 1171 } | 843 } |
| 844 ASSERT(allocation_pointer_found_in_space); |
| 1172 } | 845 } |
| 1173 #endif | 846 #endif |
| 1174 | 847 |
| 1175 | 848 |
| 1176 // ----------------------------------------------------------------------------- | 849 // ----------------------------------------------------------------------------- |
| 1177 // NewSpace implementation | 850 // NewSpace implementation |
| 1178 | 851 |
| 1179 | 852 |
| 1180 bool NewSpace::Setup(Address start, int size) { | 853 bool NewSpace::Setup(int reserved_semispace_capacity, |
| 854 int maximum_semispace_capacity) { |
| 1181 // Setup new space based on the preallocated memory block defined by | 855 // Setup new space based on the preallocated memory block defined by |
| 1182 // start and size. The provided space is divided into two semi-spaces. | 856 // start and size. The provided space is divided into two semi-spaces. |
| 1183 // To support fast containment testing in the new space, the size of | 857 // To support fast containment testing in the new space, the size of |
| 1184 // this chunk must be a power of two and it must be aligned to its size. | 858 // this chunk must be a power of two and it must be aligned to its size. |
| 1185 int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); | 859 int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); |
| 1186 int maximum_semispace_capacity = heap()->MaxSemiSpaceSize(); | 860 |
| 861 size_t size = 2 * reserved_semispace_capacity; |
| 862 Address base = |
| 863 heap()->isolate()->memory_allocator()->ReserveAlignedMemory( |
| 864 size, size, &reservation_); |
| 865 if (base == NULL) return false; |
| 866 |
| 867 chunk_base_ = base; |
| 868 chunk_size_ = static_cast<uintptr_t>(size); |
| 869 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); |
| 1187 | 870 |
| 1188 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); | 871 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); |
| 1189 ASSERT(IsPowerOf2(maximum_semispace_capacity)); | 872 ASSERT(IsPowerOf2(maximum_semispace_capacity)); |
| 1190 | 873 |
| 1191 // Allocate and setup the histogram arrays if necessary. | 874 // Allocate and setup the histogram arrays if necessary. |
| 1192 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | 875 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 1193 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | 876 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 1194 | 877 |
| 1195 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ | 878 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ |
| 1196 promoted_histogram_[name].set_name(#name); | 879 promoted_histogram_[name].set_name(#name); |
| 1197 INSTANCE_TYPE_LIST(SET_NAME) | 880 INSTANCE_TYPE_LIST(SET_NAME) |
| 1198 #undef SET_NAME | 881 #undef SET_NAME |
| 1199 | 882 |
| 1200 ASSERT(size == 2 * heap()->ReservedSemiSpaceSize()); | 883 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize()); |
| 1201 ASSERT(IsAddressAligned(start, size, 0)); | 884 ASSERT(static_cast<intptr_t>(chunk_size_) >= |
| 885 2 * heap()->ReservedSemiSpaceSize()); |
| 886 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); |
| 1202 | 887 |
| 1203 if (!to_space_.Setup(start, | 888 if (!to_space_.Setup(chunk_base_, |
| 1204 initial_semispace_capacity, | 889 initial_semispace_capacity, |
| 1205 maximum_semispace_capacity)) { | 890 maximum_semispace_capacity)) { |
| 1206 return false; | 891 return false; |
| 1207 } | 892 } |
| 1208 if (!from_space_.Setup(start + maximum_semispace_capacity, | 893 if (!from_space_.Setup(chunk_base_ + reserved_semispace_capacity, |
| 1209 initial_semispace_capacity, | 894 initial_semispace_capacity, |
| 1210 maximum_semispace_capacity)) { | 895 maximum_semispace_capacity)) { |
| 1211 return false; | 896 return false; |
| 1212 } | 897 } |
| 1213 | 898 |
| 1214 start_ = start; | 899 start_ = chunk_base_; |
| 1215 address_mask_ = ~(size - 1); | 900 address_mask_ = ~(2 * reserved_semispace_capacity - 1); |
| 1216 object_mask_ = address_mask_ | kHeapObjectTagMask; | 901 object_mask_ = address_mask_ | kHeapObjectTagMask; |
| 1217 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; | 902 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag; |
| 1218 | 903 |
| 1219 allocation_info_.top = to_space_.low(); | 904 ResetAllocationInfo(); |
| 1220 allocation_info_.limit = to_space_.high(); | |
| 1221 mc_forwarding_info_.top = NULL; | |
| 1222 mc_forwarding_info_.limit = NULL; | |
| 1223 | 905 |
| 1224 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | |
| 1225 return true; | 906 return true; |
| 1226 } | 907 } |
| 1227 | 908 |
| 1228 | 909 |
| 1229 void NewSpace::TearDown() { | 910 void NewSpace::TearDown() { |
| 1230 if (allocated_histogram_) { | 911 if (allocated_histogram_) { |
| 1231 DeleteArray(allocated_histogram_); | 912 DeleteArray(allocated_histogram_); |
| 1232 allocated_histogram_ = NULL; | 913 allocated_histogram_ = NULL; |
| 1233 } | 914 } |
| 1234 if (promoted_histogram_) { | 915 if (promoted_histogram_) { |
| 1235 DeleteArray(promoted_histogram_); | 916 DeleteArray(promoted_histogram_); |
| 1236 promoted_histogram_ = NULL; | 917 promoted_histogram_ = NULL; |
| 1237 } | 918 } |
| 1238 | 919 |
| 1239 start_ = NULL; | 920 start_ = NULL; |
| 1240 allocation_info_.top = NULL; | 921 allocation_info_.top = NULL; |
| 1241 allocation_info_.limit = NULL; | 922 allocation_info_.limit = NULL; |
| 1242 mc_forwarding_info_.top = NULL; | |
| 1243 mc_forwarding_info_.limit = NULL; | |
| 1244 | 923 |
| 1245 to_space_.TearDown(); | 924 to_space_.TearDown(); |
| 1246 from_space_.TearDown(); | 925 from_space_.TearDown(); |
| 926 |
| 927 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); |
| 928 |
| 929 ASSERT(reservation_.IsReserved()); |
| 930 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, |
| 931 NOT_EXECUTABLE); |
| 932 chunk_base_ = NULL; |
| 933 chunk_size_ = 0; |
| 1247 } | 934 } |
| 1248 | 935 |
| 1249 | 936 |
| 1250 void NewSpace::Flip() { | 937 void NewSpace::Flip() { |
| 1251 SemiSpace tmp = from_space_; | 938 SemiSpace::Swap(&from_space_, &to_space_); |
| 1252 from_space_ = to_space_; | |
| 1253 to_space_ = tmp; | |
| 1254 } | 939 } |
| 1255 | 940 |
| 1256 | 941 |
| 1257 void NewSpace::Grow() { | 942 void NewSpace::Grow() { |
| 943 // Double the semispace size but only up to maximum capacity. |
| 1258 ASSERT(Capacity() < MaximumCapacity()); | 944 ASSERT(Capacity() < MaximumCapacity()); |
| 1259 if (to_space_.Grow()) { | 945 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity())); |
| 1260 // Only grow from space if we managed to grow to space. | 946 if (to_space_.GrowTo(new_capacity)) { |
| 1261 if (!from_space_.Grow()) { | 947 // Only grow from space if we managed to grow to-space. |
| 1262 // If we managed to grow to space but couldn't grow from space, | 948 if (!from_space_.GrowTo(new_capacity)) { |
| 1263 // attempt to shrink to space. | 949 // If we managed to grow to-space but couldn't grow from-space, |
| 950 // attempt to shrink to-space. |
| 1264 if (!to_space_.ShrinkTo(from_space_.Capacity())) { | 951 if (!to_space_.ShrinkTo(from_space_.Capacity())) { |
| 1265 // We are in an inconsistent state because we could not | 952 // We are in an inconsistent state because we could not |
| 1266 // commit/uncommit memory from new space. | 953 // commit/uncommit memory from new space. |
| 1267 V8::FatalProcessOutOfMemory("Failed to grow new space."); | 954 V8::FatalProcessOutOfMemory("Failed to grow new space."); |
| 1268 } | 955 } |
| 1269 } | 956 } |
| 1270 } | 957 } |
| 1271 allocation_info_.limit = to_space_.high(); | |
| 1272 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 958 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1273 } | 959 } |
| 1274 | 960 |
| 1275 | 961 |
| 1276 void NewSpace::Shrink() { | 962 void NewSpace::Shrink() { |
| 1277 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt()); | 963 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt()); |
| 1278 int rounded_new_capacity = | 964 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize); |
| 1279 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment())); | |
| 1280 if (rounded_new_capacity < Capacity() && | 965 if (rounded_new_capacity < Capacity() && |
| 1281 to_space_.ShrinkTo(rounded_new_capacity)) { | 966 to_space_.ShrinkTo(rounded_new_capacity)) { |
| 1282 // Only shrink from space if we managed to shrink to space. | 967 // Only shrink from-space if we managed to shrink to-space. |
| 968 from_space_.Reset(); |
| 1283 if (!from_space_.ShrinkTo(rounded_new_capacity)) { | 969 if (!from_space_.ShrinkTo(rounded_new_capacity)) { |
| 1284 // If we managed to shrink to space but couldn't shrink from | 970 // If we managed to shrink to-space but couldn't shrink from |
| 1285 // space, attempt to grow to space again. | 971 // space, attempt to grow to-space again. |
| 1286 if (!to_space_.GrowTo(from_space_.Capacity())) { | 972 if (!to_space_.GrowTo(from_space_.Capacity())) { |
| 1287 // We are in an inconsistent state because we could not | 973 // We are in an inconsistent state because we could not |
| 1288 // commit/uncommit memory from new space. | 974 // commit/uncommit memory from new space. |
| 1289 V8::FatalProcessOutOfMemory("Failed to shrink new space."); | 975 V8::FatalProcessOutOfMemory("Failed to shrink new space."); |
| 1290 } | 976 } |
| 1291 } | 977 } |
| 1292 } | 978 } |
| 1293 allocation_info_.limit = to_space_.high(); | 979 allocation_info_.limit = to_space_.page_high(); |
| 980 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 981 } |
| 982 |
| 983 |
| 984 void NewSpace::UpdateAllocationInfo() { |
| 985 allocation_info_.top = to_space_.page_low(); |
| 986 allocation_info_.limit = to_space_.page_high(); |
| 987 |
| 988 // Lower limit during incremental marking. |
| 989 if (heap()->incremental_marking()->IsMarking() && |
| 990 inline_allocation_limit_step() != 0) { |
| 991 Address new_limit = |
| 992 allocation_info_.top + inline_allocation_limit_step(); |
| 993 allocation_info_.limit = Min(new_limit, allocation_info_.limit); |
| 994 } |
| 1294 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 995 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1295 } | 996 } |
| 1296 | 997 |
| 1297 | 998 |
| 1298 void NewSpace::ResetAllocationInfo() { | 999 void NewSpace::ResetAllocationInfo() { |
| 1299 allocation_info_.top = to_space_.low(); | 1000 to_space_.Reset(); |
| 1300 allocation_info_.limit = to_space_.high(); | 1001 UpdateAllocationInfo(); |
| 1301 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1002 pages_used_ = 0; |
| 1003 // Clear all mark-bits in the to-space. |
| 1004 NewSpacePageIterator it(&to_space_); |
| 1005 while (it.has_next()) { |
| 1006 Bitmap::Clear(it.next()); |
| 1007 } |
| 1302 } | 1008 } |
| 1303 | 1009 |
| 1304 | 1010 |
| 1305 void NewSpace::MCResetRelocationInfo() { | 1011 bool NewSpace::AddFreshPage() { |
| 1306 mc_forwarding_info_.top = from_space_.low(); | 1012 Address top = allocation_info_.top; |
| 1307 mc_forwarding_info_.limit = from_space_.high(); | 1013 if (NewSpacePage::IsAtStart(top)) { |
| 1308 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_); | 1014 // The current page is already empty. Don't try to make another. |
| 1309 } | |
| 1310 | 1015 |
| 1311 | 1016 // We should only get here if someone asks to allocate more |
| 1312 void NewSpace::MCCommitRelocationInfo() { | 1017 // than what can be stored in a single page. |
| 1313 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is | 1018 // TODO(gc): Change the limit on new-space allocation to prevent this |
| 1314 // valid allocation info for the to space. | 1019 // from happening (all such allocations should go directly to LOSpace). |
| 1315 allocation_info_.top = mc_forwarding_info_.top; | 1020 return false; |
| 1316 allocation_info_.limit = to_space_.high(); | 1021 } |
| 1317 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1022 if (!to_space_.AdvancePage()) { |
| 1023 // Failed to get a new page in to-space. |
| 1024 return false; |
| 1025 } |
| 1026 // Clear remainder of current page. |
| 1027 int remaining_in_page = |
| 1028 static_cast<int>(NewSpacePage::FromLimit(top)->body_limit() - top); |
| 1029 heap()->CreateFillerObjectAt(top, remaining_in_page); |
| 1030 pages_used_++; |
| 1031 UpdateAllocationInfo(); |
| 1032 return true; |
| 1318 } | 1033 } |
| 1319 | 1034 |
| 1320 | 1035 |
| 1321 #ifdef DEBUG | 1036 #ifdef DEBUG |
| 1322 // We do not use the SemispaceIterator because verification doesn't assume | 1037 // We do not use the SemiSpaceIterator because verification doesn't assume |
| 1323 // that it works (it depends on the invariants we are checking). | 1038 // that it works (it depends on the invariants we are checking). |
| 1324 void NewSpace::Verify() { | 1039 void NewSpace::Verify() { |
| 1325 // The allocation pointer should be in the space or at the very end. | 1040 // The allocation pointer should be in the space or at the very end. |
| 1326 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1041 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1327 | 1042 |
| 1328 // There should be objects packed in from the low address up to the | 1043 // There should be objects packed in from the low address up to the |
| 1329 // allocation pointer. | 1044 // allocation pointer. |
| 1330 Address current = to_space_.low(); | 1045 Address current = to_space_.first_page()->body(); |
| 1331 while (current < top()) { | 1046 CHECK_EQ(current, to_space_.space_start()); |
| 1332 HeapObject* object = HeapObject::FromAddress(current); | |
| 1333 | 1047 |
| 1334 // The first word should be a map, and we expect all map pointers to | 1048 while (current != top()) { |
| 1335 // be in map space. | 1049 if (!NewSpacePage::IsAtEnd(current)) { |
| 1336 Map* map = object->map(); | 1050 // The allocation pointer should not be in the middle of an object. |
| 1337 ASSERT(map->IsMap()); | 1051 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) || |
| 1338 ASSERT(heap()->map_space()->Contains(map)); | 1052 current < top()); |
| 1339 | 1053 |
| 1340 // The object should not be code or a map. | 1054 HeapObject* object = HeapObject::FromAddress(current); |
| 1341 ASSERT(!object->IsMap()); | |
| 1342 ASSERT(!object->IsCode()); | |
| 1343 | 1055 |
| 1344 // The object itself should look OK. | 1056 // The first word should be a map, and we expect all map pointers to |
| 1345 object->Verify(); | 1057 // be in map space. |
| 1058 Map* map = object->map(); |
| 1059 CHECK(map->IsMap()); |
| 1060 CHECK(heap()->map_space()->Contains(map)); |
| 1346 | 1061 |
| 1347 // All the interior pointers should be contained in the heap. | 1062 // The object should not be code or a map. |
| 1348 VerifyPointersVisitor visitor; | 1063 CHECK(!object->IsMap()); |
| 1349 int size = object->Size(); | 1064 CHECK(!object->IsCode()); |
| 1350 object->IterateBody(map->instance_type(), size, &visitor); | |
| 1351 | 1065 |
| 1352 current += size; | 1066 // The object itself should look OK. |
| 1067 object->Verify(); |
| 1068 |
| 1069 // All the interior pointers should be contained in the heap. |
| 1070 VerifyPointersVisitor visitor; |
| 1071 int size = object->Size(); |
| 1072 object->IterateBody(map->instance_type(), size, &visitor); |
| 1073 |
| 1074 current += size; |
| 1075 } else { |
| 1076 // At end of page, switch to next page. |
| 1077 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page(); |
| 1078 // Next page should be valid. |
| 1079 CHECK(!page->is_anchor()); |
| 1080 current = page->body(); |
| 1081 } |
| 1353 } | 1082 } |
| 1354 | 1083 |
| 1355 // The allocation pointer should not be in the middle of an object. | 1084 // Check semi-spaces. |
| 1356 ASSERT(current == top()); | 1085 ASSERT_EQ(from_space_.id(), kFromSpace); |
| 1086 ASSERT_EQ(to_space_.id(), kToSpace); |
| 1087 from_space_.Verify(); |
| 1088 to_space_.Verify(); |
| 1357 } | 1089 } |
| 1358 #endif | 1090 #endif |
| 1359 | 1091 |
| 1360 | |
| 1361 bool SemiSpace::Commit() { | |
| 1362 ASSERT(!is_committed()); | |
| 1363 if (!heap()->isolate()->memory_allocator()->CommitBlock( | |
| 1364 start_, capacity_, executable())) { | |
| 1365 return false; | |
| 1366 } | |
| 1367 committed_ = true; | |
| 1368 return true; | |
| 1369 } | |
| 1370 | |
| 1371 | |
| 1372 bool SemiSpace::Uncommit() { | |
| 1373 ASSERT(is_committed()); | |
| 1374 if (!heap()->isolate()->memory_allocator()->UncommitBlock( | |
| 1375 start_, capacity_)) { | |
| 1376 return false; | |
| 1377 } | |
| 1378 committed_ = false; | |
| 1379 return true; | |
| 1380 } | |
| 1381 | |
| 1382 | |
| 1383 // ----------------------------------------------------------------------------- | 1092 // ----------------------------------------------------------------------------- |
| 1384 // SemiSpace implementation | 1093 // SemiSpace implementation |
| 1385 | 1094 |
| 1386 bool SemiSpace::Setup(Address start, | 1095 bool SemiSpace::Setup(Address start, |
| 1387 int initial_capacity, | 1096 int initial_capacity, |
| 1388 int maximum_capacity) { | 1097 int maximum_capacity) { |
| 1389 // Creates a space in the young generation. The constructor does not | 1098 // Creates a space in the young generation. The constructor does not |
| 1390 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of | 1099 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of |
| 1391 // memory of size 'capacity' when set up, and does not grow or shrink | 1100 // memory of size 'capacity' when set up, and does not grow or shrink |
| 1392 // otherwise. In the mark-compact collector, the memory region of the from | 1101 // otherwise. In the mark-compact collector, the memory region of the from |
| 1393 // space is used as the marking stack. It requires contiguous memory | 1102 // space is used as the marking stack. It requires contiguous memory |
| 1394 // addresses. | 1103 // addresses. |
| 1395 initial_capacity_ = initial_capacity; | 1104 ASSERT(maximum_capacity >= Page::kPageSize); |
| 1105 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); |
| 1396 capacity_ = initial_capacity; | 1106 capacity_ = initial_capacity; |
| 1397 maximum_capacity_ = maximum_capacity; | 1107 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); |
| 1398 committed_ = false; | 1108 committed_ = false; |
| 1399 | |
| 1400 start_ = start; | 1109 start_ = start; |
| 1401 address_mask_ = ~(maximum_capacity - 1); | 1110 address_mask_ = ~(maximum_capacity - 1); |
| 1402 object_mask_ = address_mask_ | kHeapObjectTagMask; | 1111 object_mask_ = address_mask_ | kHeapObjectTagMask; |
| 1403 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; | 1112 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; |
| 1404 age_mark_ = start_; | 1113 age_mark_ = start_; |
| 1405 | 1114 |
| 1406 return Commit(); | 1115 return Commit(); |
| 1407 } | 1116 } |
| 1408 | 1117 |
| 1409 | 1118 |
| 1410 void SemiSpace::TearDown() { | 1119 void SemiSpace::TearDown() { |
| 1411 start_ = NULL; | 1120 start_ = NULL; |
| 1412 capacity_ = 0; | 1121 capacity_ = 0; |
| 1413 } | 1122 } |
| 1414 | 1123 |
| 1415 | 1124 |
| 1416 bool SemiSpace::Grow() { | 1125 bool SemiSpace::Commit() { |
| 1417 // Double the semispace size but only up to maximum capacity. | 1126 ASSERT(!is_committed()); |
| 1418 int maximum_extra = maximum_capacity_ - capacity_; | 1127 int pages = capacity_ / Page::kPageSize; |
| 1419 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())), | 1128 Address end = start_ + maximum_capacity_; |
| 1420 maximum_extra); | 1129 Address start = end - pages * Page::kPageSize; |
| 1421 if (!heap()->isolate()->memory_allocator()->CommitBlock( | 1130 if (!heap()->isolate()->memory_allocator()->CommitBlock(start, |
| 1422 high(), extra, executable())) { | 1131 capacity_, |
| 1423 return false; | 1132 executable())) { |
| 1424 } | 1133 return false; |
| 1425 capacity_ += extra; | 1134 } |
| 1135 |
| 1136 NewSpacePage* page = anchor(); |
| 1137 for (int i = 1; i <= pages; i++) { |
| 1138 NewSpacePage* new_page = |
| 1139 NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this); |
| 1140 new_page->InsertAfter(page); |
| 1141 page = new_page; |
| 1142 } |
| 1143 |
| 1144 committed_ = true; |
| 1145 Reset(); |
| 1146 return true; |
| 1147 } |
| 1148 |
| 1149 |
| 1150 bool SemiSpace::Uncommit() { |
| 1151 ASSERT(is_committed()); |
| 1152 Address start = start_ + maximum_capacity_ - capacity_; |
| 1153 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) { |
| 1154 return false; |
| 1155 } |
| 1156 anchor()->set_next_page(anchor()); |
| 1157 anchor()->set_prev_page(anchor()); |
| 1158 |
| 1159 committed_ = false; |
| 1426 return true; | 1160 return true; |
| 1427 } | 1161 } |
| 1428 | 1162 |
| 1429 | 1163 |
| 1430 bool SemiSpace::GrowTo(int new_capacity) { | 1164 bool SemiSpace::GrowTo(int new_capacity) { |
| 1165 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); |
| 1431 ASSERT(new_capacity <= maximum_capacity_); | 1166 ASSERT(new_capacity <= maximum_capacity_); |
| 1432 ASSERT(new_capacity > capacity_); | 1167 ASSERT(new_capacity > capacity_); |
| 1168 int pages_before = capacity_ / Page::kPageSize; |
| 1169 int pages_after = new_capacity / Page::kPageSize; |
| 1170 |
| 1171 Address end = start_ + maximum_capacity_; |
| 1172 Address start = end - new_capacity; |
| 1433 size_t delta = new_capacity - capacity_; | 1173 size_t delta = new_capacity - capacity_; |
| 1174 |
| 1434 ASSERT(IsAligned(delta, OS::AllocateAlignment())); | 1175 ASSERT(IsAligned(delta, OS::AllocateAlignment())); |
| 1435 if (!heap()->isolate()->memory_allocator()->CommitBlock( | 1176 if (!heap()->isolate()->memory_allocator()->CommitBlock( |
| 1436 high(), delta, executable())) { | 1177 start, delta, executable())) { |
| 1437 return false; | 1178 return false; |
| 1438 } | 1179 } |
| 1439 capacity_ = new_capacity; | 1180 capacity_ = new_capacity; |
| 1181 NewSpacePage* last_page = anchor()->prev_page(); |
| 1182 ASSERT(last_page != anchor()); |
| 1183 for (int i = pages_before + 1; i <= pages_after; i++) { |
| 1184 Address page_address = end - i * Page::kPageSize; |
| 1185 NewSpacePage* new_page = NewSpacePage::Initialize(heap(), |
| 1186 page_address, |
| 1187 this); |
| 1188 new_page->InsertAfter(last_page); |
| 1189 Bitmap::Clear(new_page); |
| 1190 // Duplicate the flags that was set on the old page. |
| 1191 new_page->SetFlags(last_page->GetFlags(), |
| 1192 NewSpacePage::kCopyOnFlipFlagsMask); |
| 1193 last_page = new_page; |
| 1194 } |
| 1440 return true; | 1195 return true; |
| 1441 } | 1196 } |
| 1442 | 1197 |
| 1443 | 1198 |
| 1444 bool SemiSpace::ShrinkTo(int new_capacity) { | 1199 bool SemiSpace::ShrinkTo(int new_capacity) { |
| 1200 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); |
| 1445 ASSERT(new_capacity >= initial_capacity_); | 1201 ASSERT(new_capacity >= initial_capacity_); |
| 1446 ASSERT(new_capacity < capacity_); | 1202 ASSERT(new_capacity < capacity_); |
| 1203 // Semispaces grow backwards from the end of their allocated capacity, |
| 1204 // so we find the before and after start addresses relative to the |
| 1205 // end of the space. |
| 1206 Address space_end = start_ + maximum_capacity_; |
| 1207 Address old_start = space_end - capacity_; |
| 1447 size_t delta = capacity_ - new_capacity; | 1208 size_t delta = capacity_ - new_capacity; |
| 1448 ASSERT(IsAligned(delta, OS::AllocateAlignment())); | 1209 ASSERT(IsAligned(delta, OS::AllocateAlignment())); |
| 1449 if (!heap()->isolate()->memory_allocator()->UncommitBlock( | 1210 if (!heap()->isolate()->memory_allocator()->UncommitBlock(old_start, delta)) { |
| 1450 high() - delta, delta)) { | |
| 1451 return false; | 1211 return false; |
| 1452 } | 1212 } |
| 1453 capacity_ = new_capacity; | 1213 capacity_ = new_capacity; |
| 1454 return true; | 1214 |
| 1215 int pages_after = capacity_ / Page::kPageSize; |
| 1216 NewSpacePage* new_last_page = |
| 1217 NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize); |
| 1218 new_last_page->set_next_page(anchor()); |
| 1219 anchor()->set_prev_page(new_last_page); |
| 1220 ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page)); |
| 1221 |
| 1222 return true; |
| 1223 } |
| 1224 |
| 1225 |
| 1226 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) { |
| 1227 anchor_.set_owner(this); |
| 1228 // Fixup back-pointers to anchor. Address of anchor changes |
| 1229 // when we swap. |
| 1230 anchor_.prev_page()->set_next_page(&anchor_); |
| 1231 anchor_.next_page()->set_prev_page(&anchor_); |
| 1232 |
| 1233 bool becomes_to_space = (id_ == kFromSpace); |
| 1234 id_ = becomes_to_space ? kToSpace : kFromSpace; |
| 1235 NewSpacePage* page = anchor_.next_page(); |
| 1236 while (page != &anchor_) { |
| 1237 page->set_owner(this); |
| 1238 page->SetFlags(flags, mask); |
| 1239 if (becomes_to_space) { |
| 1240 page->ClearFlag(MemoryChunk::IN_FROM_SPACE); |
| 1241 page->SetFlag(MemoryChunk::IN_TO_SPACE); |
| 1242 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); |
| 1243 page->ResetLiveBytes(); |
| 1244 } else { |
| 1245 page->SetFlag(MemoryChunk::IN_FROM_SPACE); |
| 1246 page->ClearFlag(MemoryChunk::IN_TO_SPACE); |
| 1247 } |
| 1248 ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); |
| 1249 ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) || |
| 1250 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE)); |
| 1251 page = page->next_page(); |
| 1252 } |
| 1253 } |
| 1254 |
| 1255 |
| 1256 void SemiSpace::Reset() { |
| 1257 ASSERT(anchor_.next_page() != &anchor_); |
| 1258 current_page_ = anchor_.next_page(); |
| 1259 } |
| 1260 |
| 1261 |
| 1262 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { |
| 1263 // We won't be swapping semispaces without data in them. |
| 1264 ASSERT(from->anchor_.next_page() != &from->anchor_); |
| 1265 ASSERT(to->anchor_.next_page() != &to->anchor_); |
| 1266 |
| 1267 // Swap bits. |
| 1268 SemiSpace tmp = *from; |
| 1269 *from = *to; |
| 1270 *to = tmp; |
| 1271 |
| 1272 // Fixup back-pointers to the page list anchor now that its address |
| 1273 // has changed. |
| 1274 // Swap to/from-space bits on pages. |
| 1275 // Copy GC flags from old active space (from-space) to new (to-space). |
| 1276 intptr_t flags = from->current_page()->GetFlags(); |
| 1277 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask); |
| 1278 |
| 1279 from->FlipPages(0, 0); |
| 1280 } |
| 1281 |
| 1282 |
| 1283 void SemiSpace::set_age_mark(Address mark) { |
| 1284 ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this); |
| 1285 age_mark_ = mark; |
| 1286 // Mark all pages up to the one containing mark. |
| 1287 NewSpacePageIterator it(space_start(), mark); |
| 1288 while (it.has_next()) { |
| 1289 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); |
| 1290 } |
| 1455 } | 1291 } |
| 1456 | 1292 |
| 1457 | 1293 |
| 1458 #ifdef DEBUG | 1294 #ifdef DEBUG |
| 1459 void SemiSpace::Print() { } | 1295 void SemiSpace::Print() { } |
| 1460 | 1296 |
| 1461 | 1297 |
| 1462 void SemiSpace::Verify() { } | 1298 void SemiSpace::Verify() { |
| 1299 bool is_from_space = (id_ == kFromSpace); |
| 1300 NewSpacePage* page = anchor_.next_page(); |
| 1301 CHECK(anchor_.semi_space() == this); |
| 1302 while (page != &anchor_) { |
| 1303 CHECK(page->semi_space() == this); |
| 1304 CHECK(page->InNewSpace()); |
| 1305 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE |
| 1306 : MemoryChunk::IN_TO_SPACE)); |
| 1307 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE |
| 1308 : MemoryChunk::IN_FROM_SPACE)); |
| 1309 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING)); |
| 1310 if (!is_from_space) { |
| 1311 // The pointers-from-here-are-interesting flag isn't updated dynamically |
| 1312 // on from-space pages, so it might be out of sync with the marking state. |
| 1313 if (page->heap()->incremental_marking()->IsMarking()) { |
| 1314 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); |
| 1315 } else { |
| 1316 CHECK(!page->IsFlagSet( |
| 1317 MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); |
| 1318 } |
| 1319 // TODO(gc): Check that the live_bytes_count_ field matches the |
| 1320 // black marking on the page (if we make it match in new-space). |
| 1321 } |
| 1322 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); |
| 1323 CHECK(page->prev_page()->next_page() == page); |
| 1324 page = page->next_page(); |
| 1325 } |
| 1326 } |
| 1327 |
| 1328 |
| 1329 void SemiSpace::AssertValidRange(Address start, Address end) { |
| 1330 // Addresses belong to same semi-space |
| 1331 NewSpacePage* page = NewSpacePage::FromLimit(start); |
| 1332 NewSpacePage* end_page = NewSpacePage::FromLimit(end); |
| 1333 SemiSpace* space = page->semi_space(); |
| 1334 CHECK_EQ(space, end_page->semi_space()); |
| 1335 // Start address is before end address, either on same page, |
| 1336 // or end address is on a later page in the linked list of |
| 1337 // semi-space pages. |
| 1338 if (page == end_page) { |
| 1339 CHECK(start <= end); |
| 1340 } else { |
| 1341 while (page != end_page) { |
| 1342 page = page->next_page(); |
| 1343 CHECK_NE(page, space->anchor()); |
| 1344 } |
| 1345 } |
| 1346 } |
| 1463 #endif | 1347 #endif |
| 1464 | 1348 |
| 1465 | 1349 |
| 1466 // ----------------------------------------------------------------------------- | 1350 // ----------------------------------------------------------------------------- |
| 1467 // SemiSpaceIterator implementation. | 1351 // SemiSpaceIterator implementation. |
| 1468 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) { | 1352 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) { |
| 1469 Initialize(space, space->bottom(), space->top(), NULL); | 1353 Initialize(space->bottom(), space->top(), NULL); |
| 1470 } | 1354 } |
| 1471 | 1355 |
| 1472 | 1356 |
| 1473 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, | 1357 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, |
| 1474 HeapObjectCallback size_func) { | 1358 HeapObjectCallback size_func) { |
| 1475 Initialize(space, space->bottom(), space->top(), size_func); | 1359 Initialize(space->bottom(), space->top(), size_func); |
| 1476 } | 1360 } |
| 1477 | 1361 |
| 1478 | 1362 |
| 1479 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) { | 1363 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) { |
| 1480 Initialize(space, start, space->top(), NULL); | 1364 Initialize(start, space->top(), NULL); |
| 1481 } | 1365 } |
| 1482 | 1366 |
| 1483 | 1367 |
| 1484 void SemiSpaceIterator::Initialize(NewSpace* space, Address start, | 1368 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) { |
| 1369 Initialize(from, to, NULL); |
| 1370 } |
| 1371 |
| 1372 |
| 1373 void SemiSpaceIterator::Initialize(Address start, |
| 1485 Address end, | 1374 Address end, |
| 1486 HeapObjectCallback size_func) { | 1375 HeapObjectCallback size_func) { |
| 1487 ASSERT(space->ToSpaceContains(start)); | 1376 SemiSpace::AssertValidRange(start, end); |
| 1488 ASSERT(space->ToSpaceLow() <= end | |
| 1489 && end <= space->ToSpaceHigh()); | |
| 1490 space_ = &space->to_space_; | |
| 1491 current_ = start; | 1377 current_ = start; |
| 1492 limit_ = end; | 1378 limit_ = end; |
| 1493 size_func_ = size_func; | 1379 size_func_ = size_func; |
| 1494 } | 1380 } |
| 1495 | 1381 |
| 1496 | 1382 |
| 1497 #ifdef DEBUG | 1383 #ifdef DEBUG |
| 1498 // heap_histograms is shared, always clear it before using it. | 1384 // heap_histograms is shared, always clear it before using it. |
| 1499 static void ClearHistograms() { | 1385 static void ClearHistograms() { |
| 1500 Isolate* isolate = Isolate::Current(); | 1386 Isolate* isolate = Isolate::Current(); |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1616 promoted_histogram_[i].clear(); | 1502 promoted_histogram_[i].clear(); |
| 1617 } | 1503 } |
| 1618 } | 1504 } |
| 1619 | 1505 |
| 1620 // Because the copying collector does not touch garbage objects, we iterate | 1506 // Because the copying collector does not touch garbage objects, we iterate |
| 1621 // the new space before a collection to get a histogram of allocated objects. | 1507 // the new space before a collection to get a histogram of allocated objects. |
| 1622 // This only happens when --log-gc flag is set. | 1508 // This only happens when --log-gc flag is set. |
| 1623 void NewSpace::CollectStatistics() { | 1509 void NewSpace::CollectStatistics() { |
| 1624 ClearHistograms(); | 1510 ClearHistograms(); |
| 1625 SemiSpaceIterator it(this); | 1511 SemiSpaceIterator it(this); |
| 1626 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) | 1512 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) |
| 1627 RecordAllocation(obj); | 1513 RecordAllocation(obj); |
| 1628 } | 1514 } |
| 1629 | 1515 |
| 1630 | 1516 |
| 1631 static void DoReportStatistics(Isolate* isolate, | 1517 static void DoReportStatistics(Isolate* isolate, |
| 1632 HistogramInfo* info, const char* description) { | 1518 HistogramInfo* info, const char* description) { |
| 1633 LOG(isolate, HeapSampleBeginEvent("NewSpace", description)); | 1519 LOG(isolate, HeapSampleBeginEvent("NewSpace", description)); |
| 1634 // Lump all the string types together. | 1520 // Lump all the string types together. |
| 1635 int string_number = 0; | 1521 int string_number = 0; |
| 1636 int string_bytes = 0; | 1522 int string_bytes = 0; |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1692 } | 1578 } |
| 1693 | 1579 |
| 1694 | 1580 |
| 1695 void NewSpace::RecordPromotion(HeapObject* obj) { | 1581 void NewSpace::RecordPromotion(HeapObject* obj) { |
| 1696 InstanceType type = obj->map()->instance_type(); | 1582 InstanceType type = obj->map()->instance_type(); |
| 1697 ASSERT(0 <= type && type <= LAST_TYPE); | 1583 ASSERT(0 <= type && type <= LAST_TYPE); |
| 1698 promoted_histogram_[type].increment_number(1); | 1584 promoted_histogram_[type].increment_number(1); |
| 1699 promoted_histogram_[type].increment_bytes(obj->Size()); | 1585 promoted_histogram_[type].increment_bytes(obj->Size()); |
| 1700 } | 1586 } |
| 1701 | 1587 |
| 1702 | |
| 1703 // ----------------------------------------------------------------------------- | 1588 // ----------------------------------------------------------------------------- |
| 1704 // Free lists for old object spaces implementation | 1589 // Free lists for old object spaces implementation |
| 1705 | 1590 |
| 1706 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { | 1591 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { |
| 1707 ASSERT(size_in_bytes > 0); | 1592 ASSERT(size_in_bytes > 0); |
| 1708 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1593 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
| 1709 | 1594 |
| 1710 // We write a map and possibly size information to the block. If the block | 1595 // We write a map and possibly size information to the block. If the block |
| 1711 // is big enough to be a ByteArray with at least one extra word (the next | 1596 // is big enough to be a FreeSpace with at least one extra word (the next |
| 1712 // pointer), we set its map to be the byte array map and its size to an | 1597 // pointer), we set its map to be the free space map and its size to an |
| 1713 // appropriate array length for the desired size from HeapObject::Size(). | 1598 // appropriate array length for the desired size from HeapObject::Size(). |
| 1714 // If the block is too small (eg, one or two words), to hold both a size | 1599 // If the block is too small (eg, one or two words), to hold both a size |
| 1715 // field and a next pointer, we give it a filler map that gives it the | 1600 // field and a next pointer, we give it a filler map that gives it the |
| 1716 // correct size. | 1601 // correct size. |
| 1717 if (size_in_bytes > ByteArray::kHeaderSize) { | 1602 if (size_in_bytes > FreeSpace::kHeaderSize) { |
| 1718 set_map(heap->raw_unchecked_byte_array_map()); | 1603 set_map(heap->raw_unchecked_free_space_map()); |
| 1719 // Can't use ByteArray::cast because it fails during deserialization. | 1604 // Can't use FreeSpace::cast because it fails during deserialization. |
| 1720 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this); | 1605 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); |
| 1721 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes)); | 1606 this_as_free_space->set_size(size_in_bytes); |
| 1722 } else if (size_in_bytes == kPointerSize) { | 1607 } else if (size_in_bytes == kPointerSize) { |
| 1723 set_map(heap->raw_unchecked_one_pointer_filler_map()); | 1608 set_map(heap->raw_unchecked_one_pointer_filler_map()); |
| 1724 } else if (size_in_bytes == 2 * kPointerSize) { | 1609 } else if (size_in_bytes == 2 * kPointerSize) { |
| 1725 set_map(heap->raw_unchecked_two_pointer_filler_map()); | 1610 set_map(heap->raw_unchecked_two_pointer_filler_map()); |
| 1726 } else { | 1611 } else { |
| 1727 UNREACHABLE(); | 1612 UNREACHABLE(); |
| 1728 } | 1613 } |
| 1729 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during | 1614 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during |
| 1730 // deserialization because the byte array map is not done yet. | 1615 // deserialization because the free space map is not done yet. |
| 1731 } | 1616 } |
| 1732 | 1617 |
| 1733 | 1618 |
| 1734 Address FreeListNode::next(Heap* heap) { | 1619 FreeListNode* FreeListNode::next() { |
| 1735 ASSERT(IsFreeListNode(this)); | 1620 ASSERT(IsFreeListNode(this)); |
| 1736 if (map() == heap->raw_unchecked_byte_array_map()) { | 1621 if (map() == HEAP->raw_unchecked_free_space_map()) { |
| 1622 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); |
| 1623 return reinterpret_cast<FreeListNode*>( |
| 1624 Memory::Address_at(address() + kNextOffset)); |
| 1625 } else { |
| 1626 return reinterpret_cast<FreeListNode*>( |
| 1627 Memory::Address_at(address() + kPointerSize)); |
| 1628 } |
| 1629 } |
| 1630 |
| 1631 |
| 1632 FreeListNode** FreeListNode::next_address() { |
| 1633 ASSERT(IsFreeListNode(this)); |
| 1634 if (map() == HEAP->raw_unchecked_free_space_map()) { |
| 1737 ASSERT(Size() >= kNextOffset + kPointerSize); | 1635 ASSERT(Size() >= kNextOffset + kPointerSize); |
| 1738 return Memory::Address_at(address() + kNextOffset); | 1636 return reinterpret_cast<FreeListNode**>(address() + kNextOffset); |
| 1739 } else { | 1637 } else { |
| 1740 return Memory::Address_at(address() + kPointerSize); | 1638 return reinterpret_cast<FreeListNode**>(address() + kPointerSize); |
| 1741 } | 1639 } |
| 1742 } | 1640 } |
| 1743 | 1641 |
| 1744 | 1642 |
| 1745 void FreeListNode::set_next(Heap* heap, Address next) { | 1643 void FreeListNode::set_next(FreeListNode* next) { |
| 1746 ASSERT(IsFreeListNode(this)); | 1644 ASSERT(IsFreeListNode(this)); |
| 1747 if (map() == heap->raw_unchecked_byte_array_map()) { | 1645 // While we are booting the VM the free space map will actually be null. So |
| 1748 ASSERT(Size() >= kNextOffset + kPointerSize); | 1646 // we have to make sure that we don't try to use it for anything at that |
| 1749 Memory::Address_at(address() + kNextOffset) = next; | 1647 // stage. |
| 1750 } else { | 1648 if (map() == HEAP->raw_unchecked_free_space_map()) { |
| 1751 Memory::Address_at(address() + kPointerSize) = next; | 1649 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); |
| 1752 } | 1650 Memory::Address_at(address() + kNextOffset) = |
| 1753 } | 1651 reinterpret_cast<Address>(next); |
| 1754 | 1652 } else { |
| 1755 | 1653 Memory::Address_at(address() + kPointerSize) = |
| 1756 OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner) | 1654 reinterpret_cast<Address>(next); |
| 1757 : heap_(heap), | 1655 } |
| 1758 owner_(owner) { | 1656 } |
| 1657 |
| 1658 |
| 1659 FreeList::FreeList(PagedSpace* owner) |
| 1660 : owner_(owner), heap_(owner->heap()) { |
| 1759 Reset(); | 1661 Reset(); |
| 1760 } | 1662 } |
| 1761 | 1663 |
| 1762 | 1664 |
| 1763 void OldSpaceFreeList::Reset() { | 1665 void FreeList::Reset() { |
| 1764 available_ = 0; | 1666 available_ = 0; |
| 1765 for (int i = 0; i < kFreeListsLength; i++) { | 1667 small_list_ = NULL; |
| 1766 free_[i].head_node_ = NULL; | 1668 medium_list_ = NULL; |
| 1767 } | 1669 large_list_ = NULL; |
| 1768 needs_rebuild_ = false; | 1670 huge_list_ = NULL; |
| 1769 finger_ = kHead; | 1671 } |
| 1770 free_[kHead].next_size_ = kEnd; | 1672 |
| 1771 } | 1673 |
| 1772 | 1674 int FreeList::Free(Address start, int size_in_bytes) { |
| 1773 | 1675 if (size_in_bytes == 0) return 0; |
| 1774 void OldSpaceFreeList::RebuildSizeList() { | |
| 1775 ASSERT(needs_rebuild_); | |
| 1776 int cur = kHead; | |
| 1777 for (int i = cur + 1; i < kFreeListsLength; i++) { | |
| 1778 if (free_[i].head_node_ != NULL) { | |
| 1779 free_[cur].next_size_ = i; | |
| 1780 cur = i; | |
| 1781 } | |
| 1782 } | |
| 1783 free_[cur].next_size_ = kEnd; | |
| 1784 needs_rebuild_ = false; | |
| 1785 } | |
| 1786 | |
| 1787 | |
| 1788 int OldSpaceFreeList::Free(Address start, int size_in_bytes) { | |
| 1789 #ifdef DEBUG | |
| 1790 Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes); | |
| 1791 #endif | |
| 1792 FreeListNode* node = FreeListNode::FromAddress(start); | 1676 FreeListNode* node = FreeListNode::FromAddress(start); |
| 1793 node->set_size(heap_, size_in_bytes); | 1677 node->set_size(heap_, size_in_bytes); |
| 1794 | 1678 |
| 1795 // We don't use the freelists in compacting mode. This makes it more like a | 1679 // Early return to drop too-small blocks on the floor. |
| 1796 // GC that only has mark-sweep-compact and doesn't have a mark-sweep | 1680 if (size_in_bytes < kSmallListMin) return size_in_bytes; |
| 1797 // collector. | 1681 |
| 1798 if (FLAG_always_compact) { | 1682 // Insert other blocks at the head of a free list of the appropriate |
| 1799 return size_in_bytes; | 1683 // magnitude. |
| 1800 } | 1684 if (size_in_bytes <= kSmallListMax) { |
| 1801 | 1685 node->set_next(small_list_); |
| 1802 // Early return to drop too-small blocks on the floor (one or two word | 1686 small_list_ = node; |
| 1803 // blocks cannot hold a map pointer, a size field, and a pointer to the | 1687 } else if (size_in_bytes <= kMediumListMax) { |
| 1804 // next block in the free list). | 1688 node->set_next(medium_list_); |
| 1805 if (size_in_bytes < kMinBlockSize) { | 1689 medium_list_ = node; |
| 1806 return size_in_bytes; | 1690 } else if (size_in_bytes <= kLargeListMax) { |
| 1807 } | 1691 node->set_next(large_list_); |
| 1808 | 1692 large_list_ = node; |
| 1809 // Insert other blocks at the head of an exact free list. | 1693 } else { |
| 1810 int index = size_in_bytes >> kPointerSizeLog2; | 1694 node->set_next(huge_list_); |
| 1811 node->set_next(heap_, free_[index].head_node_); | 1695 huge_list_ = node; |
| 1812 free_[index].head_node_ = node->address(); | 1696 } |
| 1813 available_ += size_in_bytes; | 1697 available_ += size_in_bytes; |
| 1814 needs_rebuild_ = true; | 1698 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
| 1815 return 0; | 1699 return 0; |
| 1816 } | 1700 } |
| 1817 | 1701 |
| 1818 | 1702 |
| 1819 MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) { | 1703 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) { |
| 1704 FreeListNode* node = *list; |
| 1705 |
| 1706 if (node == NULL) return NULL; |
| 1707 |
| 1708 while (node != NULL && |
| 1709 Page::FromAddress(node->address())->IsEvacuationCandidate()) { |
| 1710 available_ -= node->Size(); |
| 1711 node = node->next(); |
| 1712 } |
| 1713 |
| 1714 if (node != NULL) { |
| 1715 *node_size = node->Size(); |
| 1716 *list = node->next(); |
| 1717 } else { |
| 1718 *list = NULL; |
| 1719 } |
| 1720 |
| 1721 return node; |
| 1722 } |
| 1723 |
| 1724 |
| 1725 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { |
| 1726 FreeListNode* node = NULL; |
| 1727 |
| 1728 if (size_in_bytes <= kSmallAllocationMax) { |
| 1729 node = PickNodeFromList(&small_list_, node_size); |
| 1730 if (node != NULL) return node; |
| 1731 } |
| 1732 |
| 1733 if (size_in_bytes <= kMediumAllocationMax) { |
| 1734 node = PickNodeFromList(&medium_list_, node_size); |
| 1735 if (node != NULL) return node; |
| 1736 } |
| 1737 |
| 1738 if (size_in_bytes <= kLargeAllocationMax) { |
| 1739 node = PickNodeFromList(&large_list_, node_size); |
| 1740 if (node != NULL) return node; |
| 1741 } |
| 1742 |
| 1743 for (FreeListNode** cur = &huge_list_; |
| 1744 *cur != NULL; |
| 1745 cur = (*cur)->next_address()) { |
| 1746 FreeListNode* cur_node = *cur; |
| 1747 while (cur_node != NULL && |
| 1748 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { |
| 1749 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size(); |
| 1750 cur_node = cur_node->next(); |
| 1751 } |
| 1752 |
| 1753 *cur = cur_node; |
| 1754 if (cur_node == NULL) break; |
| 1755 |
| 1756 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map()); |
| 1757 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); |
| 1758 int size = cur_as_free_space->Size(); |
| 1759 if (size >= size_in_bytes) { |
| 1760 // Large enough node found. Unlink it from the list. |
| 1761 node = *cur; |
| 1762 *node_size = size; |
| 1763 *cur = node->next(); |
| 1764 break; |
| 1765 } |
| 1766 } |
| 1767 |
| 1768 return node; |
| 1769 } |
| 1770 |
| 1771 |
| 1772 // Allocation on the old space free list. If it succeeds then a new linear |
| 1773 // allocation space has been set up with the top and limit of the space. If |
| 1774 // the allocation fails then NULL is returned, and the caller can perform a GC |
| 1775 // or allocate a new page before retrying. |
| 1776 HeapObject* FreeList::Allocate(int size_in_bytes) { |
| 1820 ASSERT(0 < size_in_bytes); | 1777 ASSERT(0 < size_in_bytes); |
| 1821 ASSERT(size_in_bytes <= kMaxBlockSize); | 1778 ASSERT(size_in_bytes <= kMaxBlockSize); |
| 1822 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1779 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
| 1823 | 1780 // Don't free list allocate if there is linear space available. |
| 1824 if (needs_rebuild_) RebuildSizeList(); | 1781 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); |
| 1825 int index = size_in_bytes >> kPointerSizeLog2; | 1782 |
| 1826 // Check for a perfect fit. | 1783 int new_node_size = 0; |
| 1827 if (free_[index].head_node_ != NULL) { | 1784 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); |
| 1828 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_); | 1785 if (new_node == NULL) return NULL; |
| 1829 // If this was the last block of its size, remove the size. | 1786 |
| 1830 if ((free_[index].head_node_ = node->next(heap_)) == NULL) | 1787 available_ -= new_node_size; |
| 1831 RemoveSize(index); | 1788 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
| 1832 available_ -= size_in_bytes; | 1789 |
| 1833 *wasted_bytes = 0; | 1790 int bytes_left = new_node_size - size_in_bytes; |
| 1834 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. | 1791 ASSERT(bytes_left >= 0); |
| 1835 return node; | 1792 |
| 1836 } | 1793 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); |
| 1837 // Search the size list for the best fit. | 1794 // Mark the old linear allocation area with a free space map so it can be |
| 1838 int prev = finger_ < index ? finger_ : kHead; | 1795 // skipped when scanning the heap. This also puts it back in the free list |
| 1839 int cur = FindSize(index, &prev); | 1796 // if it is big enough. |
| 1840 ASSERT(index < cur); | 1797 owner_->Free(owner_->top(), old_linear_size); |
| 1841 if (cur == kEnd) { | 1798 owner_->heap()->incremental_marking()->OldSpaceStep( |
| 1842 // No large enough size in list. | 1799 size_in_bytes - old_linear_size); |
| 1843 *wasted_bytes = 0; | 1800 |
| 1844 return Failure::RetryAfterGC(owner_); | 1801 const int kThreshold = IncrementalMarking::kAllocatedThreshold; |
| 1845 } | 1802 |
| 1846 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. | 1803 // Memory in the linear allocation area is counted as allocated. We may free |
| 1847 int rem = cur - index; | 1804 // a little of this again immediately - see below. |
| 1848 int rem_bytes = rem << kPointerSizeLog2; | 1805 owner_->Allocate(new_node_size); |
| 1849 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_); | 1806 |
| 1850 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2)); | 1807 if (bytes_left > kThreshold && |
| 1851 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ + | 1808 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && |
| 1852 size_in_bytes); | 1809 FLAG_incremental_marking_steps) { |
| 1853 // Distinguish the cases prev < rem < cur and rem <= prev < cur | 1810 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); |
| 1854 // to avoid many redundant tests and calls to Insert/RemoveSize. | 1811 // We don't want to give too large linear areas to the allocator while |
| 1855 if (prev < rem) { | 1812 // incremental marking is going on, because we won't check again whether |
| 1856 // Simple case: insert rem between prev and cur. | 1813 // we want to do another increment until the linear area is used up. |
| 1857 finger_ = prev; | 1814 owner_->Free(new_node->address() + size_in_bytes + linear_size, |
| 1858 free_[prev].next_size_ = rem; | 1815 new_node_size - size_in_bytes - linear_size); |
| 1859 // If this was the last block of size cur, remove the size. | 1816 owner_->SetTop(new_node->address() + size_in_bytes, |
| 1860 if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) { | 1817 new_node->address() + size_in_bytes + linear_size); |
| 1861 free_[rem].next_size_ = free_[cur].next_size_; | 1818 } else if (bytes_left > 0) { |
| 1862 } else { | 1819 // Normally we give the rest of the node to the allocator as its new |
| 1863 free_[rem].next_size_ = cur; | 1820 // linear allocation area. |
| 1821 owner_->SetTop(new_node->address() + size_in_bytes, |
| 1822 new_node->address() + new_node_size); |
| 1823 } else { |
| 1824 // TODO(gc) Try not freeing linear allocation region when bytes_left |
| 1825 // are zero. |
| 1826 owner_->SetTop(NULL, NULL); |
| 1827 } |
| 1828 |
| 1829 return new_node; |
| 1830 } |
| 1831 |
| 1832 |
| 1833 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { |
| 1834 intptr_t sum = 0; |
| 1835 while (n != NULL) { |
| 1836 if (Page::FromAddress(n->address()) == p) { |
| 1837 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n); |
| 1838 sum += free_space->Size(); |
| 1864 } | 1839 } |
| 1865 // Add the remainder block. | 1840 n = n->next(); |
| 1866 rem_node->set_size(heap_, rem_bytes); | 1841 } |
| 1867 rem_node->set_next(heap_, free_[rem].head_node_); | 1842 return sum; |
| 1868 free_[rem].head_node_ = rem_node->address(); | 1843 } |
| 1869 } else { | 1844 |
| 1870 // If this was the last block of size cur, remove the size. | 1845 |
| 1871 if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) { | 1846 void FreeList::CountFreeListItems(Page* p, intptr_t* sizes) { |
| 1872 finger_ = prev; | 1847 sizes[0] = CountFreeListItemsInList(small_list_, p); |
| 1873 free_[prev].next_size_ = free_[cur].next_size_; | 1848 sizes[1] = CountFreeListItemsInList(medium_list_, p); |
| 1874 } | 1849 sizes[2] = CountFreeListItemsInList(large_list_, p); |
| 1875 if (rem_bytes < kMinBlockSize) { | 1850 sizes[3] = CountFreeListItemsInList(huge_list_, p); |
| 1876 // Too-small remainder is wasted. | 1851 } |
| 1877 rem_node->set_size(heap_, rem_bytes); | |
| 1878 available_ -= size_in_bytes + rem_bytes; | |
| 1879 *wasted_bytes = rem_bytes; | |
| 1880 return cur_node; | |
| 1881 } | |
| 1882 // Add the remainder block and, if needed, insert its size. | |
| 1883 rem_node->set_size(heap_, rem_bytes); | |
| 1884 rem_node->set_next(heap_, free_[rem].head_node_); | |
| 1885 free_[rem].head_node_ = rem_node->address(); | |
| 1886 if (rem_node->next(heap_) == NULL) InsertSize(rem); | |
| 1887 } | |
| 1888 available_ -= size_in_bytes; | |
| 1889 *wasted_bytes = 0; | |
| 1890 return cur_node; | |
| 1891 } | |
| 1892 | |
| 1893 | |
| 1894 void OldSpaceFreeList::MarkNodes() { | |
| 1895 for (int i = 0; i < kFreeListsLength; i++) { | |
| 1896 Address cur_addr = free_[i].head_node_; | |
| 1897 while (cur_addr != NULL) { | |
| 1898 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); | |
| 1899 cur_addr = cur_node->next(heap_); | |
| 1900 cur_node->SetMark(); | |
| 1901 } | |
| 1902 } | |
| 1903 } | |
| 1904 | |
| 1905 | 1852 |
| 1906 #ifdef DEBUG | 1853 #ifdef DEBUG |
| 1907 bool OldSpaceFreeList::Contains(FreeListNode* node) { | 1854 intptr_t FreeList::SumFreeList(FreeListNode* cur) { |
| 1908 for (int i = 0; i < kFreeListsLength; i++) { | 1855 intptr_t sum = 0; |
| 1909 Address cur_addr = free_[i].head_node_; | 1856 while (cur != NULL) { |
| 1910 while (cur_addr != NULL) { | 1857 ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map()); |
| 1911 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); | 1858 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur); |
| 1912 if (cur_node == node) return true; | 1859 sum += cur_as_free_space->Size(); |
| 1913 cur_addr = cur_node->next(heap_); | 1860 cur = cur->next(); |
| 1914 } | 1861 } |
| 1915 } | 1862 return sum; |
| 1863 } |
| 1864 |
| 1865 |
| 1866 static const int kVeryLongFreeList = 500; |
| 1867 |
| 1868 |
| 1869 int FreeList::FreeListLength(FreeListNode* cur) { |
| 1870 int length = 0; |
| 1871 while (cur != NULL) { |
| 1872 length++; |
| 1873 cur = cur->next(); |
| 1874 if (length == kVeryLongFreeList) return length; |
| 1875 } |
| 1876 return length; |
| 1877 } |
| 1878 |
| 1879 |
| 1880 bool FreeList::IsVeryLong() { |
| 1881 if (FreeListLength(small_list_) == kVeryLongFreeList) return true; |
| 1882 if (FreeListLength(medium_list_) == kVeryLongFreeList) return true; |
| 1883 if (FreeListLength(large_list_) == kVeryLongFreeList) return true; |
| 1884 if (FreeListLength(huge_list_) == kVeryLongFreeList) return true; |
| 1916 return false; | 1885 return false; |
| 1917 } | 1886 } |
| 1887 |
| 1888 |
| 1889 // This can take a very long time because it is linear in the number of entries |
| 1890 // on the free list, so it should not be called if FreeListLength returns |
| 1891 // kVeryLongFreeList. |
| 1892 intptr_t FreeList::SumFreeLists() { |
| 1893 intptr_t sum = SumFreeList(small_list_); |
| 1894 sum += SumFreeList(medium_list_); |
| 1895 sum += SumFreeList(large_list_); |
| 1896 sum += SumFreeList(huge_list_); |
| 1897 return sum; |
| 1898 } |
| 1918 #endif | 1899 #endif |
| 1919 | 1900 |
| 1920 | 1901 |
| 1921 FixedSizeFreeList::FixedSizeFreeList(Heap* heap, | |
| 1922 AllocationSpace owner, | |
| 1923 int object_size) | |
| 1924 : heap_(heap), owner_(owner), object_size_(object_size) { | |
| 1925 Reset(); | |
| 1926 } | |
| 1927 | |
| 1928 | |
| 1929 void FixedSizeFreeList::Reset() { | |
| 1930 available_ = 0; | |
| 1931 head_ = tail_ = NULL; | |
| 1932 } | |
| 1933 | |
| 1934 | |
| 1935 void FixedSizeFreeList::Free(Address start) { | |
| 1936 #ifdef DEBUG | |
| 1937 Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_); | |
| 1938 #endif | |
| 1939 // We only use the freelists with mark-sweep. | |
| 1940 ASSERT(!HEAP->mark_compact_collector()->IsCompacting()); | |
| 1941 FreeListNode* node = FreeListNode::FromAddress(start); | |
| 1942 node->set_size(heap_, object_size_); | |
| 1943 node->set_next(heap_, NULL); | |
| 1944 if (head_ == NULL) { | |
| 1945 tail_ = head_ = node->address(); | |
| 1946 } else { | |
| 1947 FreeListNode::FromAddress(tail_)->set_next(heap_, node->address()); | |
| 1948 tail_ = node->address(); | |
| 1949 } | |
| 1950 available_ += object_size_; | |
| 1951 } | |
| 1952 | |
| 1953 | |
| 1954 MaybeObject* FixedSizeFreeList::Allocate() { | |
| 1955 if (head_ == NULL) { | |
| 1956 return Failure::RetryAfterGC(owner_); | |
| 1957 } | |
| 1958 | |
| 1959 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. | |
| 1960 FreeListNode* node = FreeListNode::FromAddress(head_); | |
| 1961 head_ = node->next(heap_); | |
| 1962 available_ -= object_size_; | |
| 1963 return node; | |
| 1964 } | |
| 1965 | |
| 1966 | |
| 1967 void FixedSizeFreeList::MarkNodes() { | |
| 1968 Address cur_addr = head_; | |
| 1969 while (cur_addr != NULL && cur_addr != tail_) { | |
| 1970 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); | |
| 1971 cur_addr = cur_node->next(heap_); | |
| 1972 cur_node->SetMark(); | |
| 1973 } | |
| 1974 } | |
| 1975 | |
| 1976 | |
| 1977 // ----------------------------------------------------------------------------- | 1902 // ----------------------------------------------------------------------------- |
| 1978 // OldSpace implementation | 1903 // OldSpace implementation |
| 1979 | 1904 |
| 1980 void OldSpace::PrepareForMarkCompact(bool will_compact) { | |
| 1981 // Call prepare of the super class. | |
| 1982 PagedSpace::PrepareForMarkCompact(will_compact); | |
| 1983 | |
| 1984 if (will_compact) { | |
| 1985 // Reset relocation info. During a compacting collection, everything in | |
| 1986 // the space is considered 'available' and we will rediscover live data | |
| 1987 // and waste during the collection. | |
| 1988 MCResetRelocationInfo(); | |
| 1989 ASSERT(Available() == Capacity()); | |
| 1990 } else { | |
| 1991 // During a non-compacting collection, everything below the linear | |
| 1992 // allocation pointer is considered allocated (everything above is | |
| 1993 // available) and we will rediscover available and wasted bytes during | |
| 1994 // the collection. | |
| 1995 accounting_stats_.AllocateBytes(free_list_.available()); | |
| 1996 accounting_stats_.FillWastedBytes(Waste()); | |
| 1997 } | |
| 1998 | |
| 1999 // Clear the free list before a full GC---it will be rebuilt afterward. | |
| 2000 free_list_.Reset(); | |
| 2001 } | |
| 2002 | |
| 2003 | |
| 2004 void OldSpace::MCCommitRelocationInfo() { | |
| 2005 // Update fast allocation info. | |
| 2006 allocation_info_.top = mc_forwarding_info_.top; | |
| 2007 allocation_info_.limit = mc_forwarding_info_.limit; | |
| 2008 ASSERT(allocation_info_.VerifyPagedAllocation()); | |
| 2009 | |
| 2010 // The space is compacted and we haven't yet built free lists or | |
| 2011 // wasted any space. | |
| 2012 ASSERT(Waste() == 0); | |
| 2013 ASSERT(AvailableFree() == 0); | |
| 2014 | |
| 2015 // Build the free list for the space. | |
| 2016 int computed_size = 0; | |
| 2017 PageIterator it(this, PageIterator::PAGES_USED_BY_MC); | |
| 2018 while (it.has_next()) { | |
| 2019 Page* p = it.next(); | |
| 2020 // Space below the relocation pointer is allocated. | |
| 2021 computed_size += | |
| 2022 static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart()); | |
| 2023 if (it.has_next()) { | |
| 2024 // Free the space at the top of the page. | |
| 2025 int extra_size = | |
| 2026 static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark()); | |
| 2027 if (extra_size > 0) { | |
| 2028 int wasted_bytes = free_list_.Free(p->AllocationWatermark(), | |
| 2029 extra_size); | |
| 2030 // The bytes we have just "freed" to add to the free list were | |
| 2031 // already accounted as available. | |
| 2032 accounting_stats_.WasteBytes(wasted_bytes); | |
| 2033 } | |
| 2034 } | |
| 2035 } | |
| 2036 | |
| 2037 // Make sure the computed size - based on the used portion of the pages in | |
| 2038 // use - matches the size obtained while computing forwarding addresses. | |
| 2039 ASSERT(computed_size == Size()); | |
| 2040 } | |
| 2041 | |
| 2042 | |
| 2043 bool NewSpace::ReserveSpace(int bytes) { | 1905 bool NewSpace::ReserveSpace(int bytes) { |
| 2044 // We can't reliably unpack a partial snapshot that needs more new space | 1906 // We can't reliably unpack a partial snapshot that needs more new space |
| 2045 // space than the minimum NewSpace size. | 1907 // space than the minimum NewSpace size. |
| 2046 ASSERT(bytes <= InitialCapacity()); | 1908 ASSERT(bytes <= InitialCapacity()); |
| 2047 Address limit = allocation_info_.limit; | 1909 Address limit = allocation_info_.limit; |
| 2048 Address top = allocation_info_.top; | 1910 Address top = allocation_info_.top; |
| 2049 return limit - top >= bytes; | 1911 return limit - top >= bytes; |
| 2050 } | 1912 } |
| 2051 | 1913 |
| 2052 | 1914 |
| 2053 void PagedSpace::FreePages(Page* prev, Page* last) { | 1915 void PagedSpace::PrepareForMarkCompact() { |
| 2054 if (last == AllocationTopPage()) { | 1916 // We don't have a linear allocation area while sweeping. It will be restored |
| 2055 // Pages are already at the end of used pages. | 1917 // on the first allocation after the sweep. |
| 2056 return; | 1918 // Mark the old linear allocation area with a free space map so it can be |
| 1919 // skipped when scanning the heap. |
| 1920 int old_linear_size = static_cast<int>(limit() - top()); |
| 1921 Free(top(), old_linear_size); |
| 1922 SetTop(NULL, NULL); |
| 1923 |
| 1924 // Stop lazy sweeping and clear marking bits for unswept pages. |
| 1925 if (first_unswept_page_ != NULL) { |
| 1926 Page* last = last_unswept_page_; |
| 1927 Page* p = first_unswept_page_; |
| 1928 do { |
| 1929 // Do not use ShouldBeSweptLazily predicate here. |
| 1930 // New evacuation candidates were selected but they still have |
| 1931 // to be swept before collection starts. |
| 1932 if (!p->WasSwept()) { |
| 1933 Bitmap::Clear(p); |
| 1934 if (FLAG_gc_verbose) { |
| 1935 PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n", |
| 1936 reinterpret_cast<intptr_t>(p)); |
| 1937 } |
| 1938 } |
| 1939 p = p->next_page(); |
| 1940 } while (p != last); |
| 2057 } | 1941 } |
| 1942 first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL); |
| 2058 | 1943 |
| 2059 Page* first = NULL; | 1944 // Clear the free list before a full GC---it will be rebuilt afterward. |
| 2060 | 1945 free_list_.Reset(); |
| 2061 // Remove pages from the list. | |
| 2062 if (prev == NULL) { | |
| 2063 first = first_page_; | |
| 2064 first_page_ = last->next_page(); | |
| 2065 } else { | |
| 2066 first = prev->next_page(); | |
| 2067 heap()->isolate()->memory_allocator()->SetNextPage( | |
| 2068 prev, last->next_page()); | |
| 2069 } | |
| 2070 | |
| 2071 // Attach it after the last page. | |
| 2072 heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first); | |
| 2073 last_page_ = last; | |
| 2074 heap()->isolate()->memory_allocator()->SetNextPage(last, NULL); | |
| 2075 | |
| 2076 // Clean them up. | |
| 2077 do { | |
| 2078 first->InvalidateWatermark(true); | |
| 2079 first->SetAllocationWatermark(first->ObjectAreaStart()); | |
| 2080 first->SetCachedAllocationWatermark(first->ObjectAreaStart()); | |
| 2081 first->SetRegionMarks(Page::kAllRegionsCleanMarks); | |
| 2082 first = first->next_page(); | |
| 2083 } while (first != NULL); | |
| 2084 | |
| 2085 // Order of pages in this space might no longer be consistent with | |
| 2086 // order of pages in chunks. | |
| 2087 page_list_is_chunk_ordered_ = false; | |
| 2088 } | 1946 } |
| 2089 | 1947 |
| 2090 | 1948 |
| 2091 void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) { | 1949 bool PagedSpace::ReserveSpace(int size_in_bytes) { |
| 2092 const bool add_to_freelist = true; | 1950 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize); |
| 1951 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); |
| 1952 Address current_top = allocation_info_.top; |
| 1953 Address new_top = current_top + size_in_bytes; |
| 1954 if (new_top <= allocation_info_.limit) return true; |
| 2093 | 1955 |
| 2094 // Mark used and unused pages to properly fill unused pages | 1956 HeapObject* new_area = free_list_.Allocate(size_in_bytes); |
| 2095 // after reordering. | 1957 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); |
| 2096 PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES); | 1958 if (new_area == NULL) return false; |
| 2097 Page* last_in_use = AllocationTopPage(); | |
| 2098 bool in_use = true; | |
| 2099 | 1959 |
| 2100 while (all_pages_iterator.has_next()) { | 1960 int old_linear_size = static_cast<int>(limit() - top()); |
| 2101 Page* p = all_pages_iterator.next(); | 1961 // Mark the old linear allocation area with a free space so it can be |
| 2102 p->SetWasInUseBeforeMC(in_use); | 1962 // skipped when scanning the heap. This also puts it back in the free list |
| 2103 if (p == last_in_use) { | 1963 // if it is big enough. |
| 2104 // We passed a page containing allocation top. All consequent | 1964 Free(top(), old_linear_size); |
| 2105 // pages are not used. | |
| 2106 in_use = false; | |
| 2107 } | |
| 2108 } | |
| 2109 | 1965 |
| 2110 if (page_list_is_chunk_ordered_) return; | 1966 SetTop(new_area->address(), new_area->address() + size_in_bytes); |
| 2111 | 1967 Allocate(size_in_bytes); |
| 2112 Page* new_last_in_use = Page::FromAddress(NULL); | |
| 2113 heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder( | |
| 2114 this, &first_page_, &last_page_, &new_last_in_use); | |
| 2115 ASSERT(new_last_in_use->is_valid()); | |
| 2116 | |
| 2117 if (new_last_in_use != last_in_use) { | |
| 2118 // Current allocation top points to a page which is now in the middle | |
| 2119 // of page list. We should move allocation top forward to the new last | |
| 2120 // used page so various object iterators will continue to work properly. | |
| 2121 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - | |
| 2122 last_in_use->AllocationTop()); | |
| 2123 | |
| 2124 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop()); | |
| 2125 if (size_in_bytes > 0) { | |
| 2126 Address start = last_in_use->AllocationTop(); | |
| 2127 if (deallocate_blocks) { | |
| 2128 accounting_stats_.AllocateBytes(size_in_bytes); | |
| 2129 DeallocateBlock(start, size_in_bytes, add_to_freelist); | |
| 2130 } else { | |
| 2131 heap()->CreateFillerObjectAt(start, size_in_bytes); | |
| 2132 } | |
| 2133 } | |
| 2134 | |
| 2135 // New last in use page was in the middle of the list before | |
| 2136 // sorting so it full. | |
| 2137 SetTop(new_last_in_use->AllocationTop()); | |
| 2138 | |
| 2139 ASSERT(AllocationTopPage() == new_last_in_use); | |
| 2140 ASSERT(AllocationTopPage()->WasInUseBeforeMC()); | |
| 2141 } | |
| 2142 | |
| 2143 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE); | |
| 2144 while (pages_in_use_iterator.has_next()) { | |
| 2145 Page* p = pages_in_use_iterator.next(); | |
| 2146 if (!p->WasInUseBeforeMC()) { | |
| 2147 // Empty page is in the middle of a sequence of used pages. | |
| 2148 // Allocate it as a whole and deallocate immediately. | |
| 2149 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - | |
| 2150 p->ObjectAreaStart()); | |
| 2151 | |
| 2152 p->SetAllocationWatermark(p->ObjectAreaStart()); | |
| 2153 Address start = p->ObjectAreaStart(); | |
| 2154 if (deallocate_blocks) { | |
| 2155 accounting_stats_.AllocateBytes(size_in_bytes); | |
| 2156 DeallocateBlock(start, size_in_bytes, add_to_freelist); | |
| 2157 } else { | |
| 2158 heap()->CreateFillerObjectAt(start, size_in_bytes); | |
| 2159 } | |
| 2160 } | |
| 2161 } | |
| 2162 | |
| 2163 page_list_is_chunk_ordered_ = true; | |
| 2164 } | |
| 2165 | |
| 2166 | |
| 2167 void PagedSpace::PrepareForMarkCompact(bool will_compact) { | |
| 2168 if (will_compact) { | |
| 2169 RelinkPageListInChunkOrder(false); | |
| 2170 } | |
| 2171 } | |
| 2172 | |
| 2173 | |
| 2174 bool PagedSpace::ReserveSpace(int bytes) { | |
| 2175 Address limit = allocation_info_.limit; | |
| 2176 Address top = allocation_info_.top; | |
| 2177 if (limit - top >= bytes) return true; | |
| 2178 | |
| 2179 // There wasn't enough space in the current page. Lets put the rest | |
| 2180 // of the page on the free list and start a fresh page. | |
| 2181 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_)); | |
| 2182 | |
| 2183 Page* reserved_page = TopPageOf(allocation_info_); | |
| 2184 int bytes_left_to_reserve = bytes; | |
| 2185 while (bytes_left_to_reserve > 0) { | |
| 2186 if (!reserved_page->next_page()->is_valid()) { | |
| 2187 if (heap()->OldGenerationAllocationLimitReached()) return false; | |
| 2188 Expand(reserved_page); | |
| 2189 } | |
| 2190 bytes_left_to_reserve -= Page::kPageSize; | |
| 2191 reserved_page = reserved_page->next_page(); | |
| 2192 if (!reserved_page->is_valid()) return false; | |
| 2193 } | |
| 2194 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid()); | |
| 2195 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true); | |
| 2196 SetAllocationInfo(&allocation_info_, | |
| 2197 TopPageOf(allocation_info_)->next_page()); | |
| 2198 return true; | 1968 return true; |
| 2199 } | 1969 } |
| 2200 | 1970 |
| 2201 | 1971 |
| 2202 // You have to call this last, since the implementation from PagedSpace | 1972 // You have to call this last, since the implementation from PagedSpace |
| 2203 // doesn't know that memory was 'promised' to large object space. | 1973 // doesn't know that memory was 'promised' to large object space. |
| 2204 bool LargeObjectSpace::ReserveSpace(int bytes) { | 1974 bool LargeObjectSpace::ReserveSpace(int bytes) { |
| 2205 return heap()->OldGenerationSpaceAvailable() >= bytes; | 1975 return heap()->OldGenerationSpaceAvailable() >= bytes; |
| 2206 } | 1976 } |
| 2207 | 1977 |
| 2208 | 1978 |
| 2209 // Slow case for normal allocation. Try in order: (1) allocate in the next | 1979 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { |
| 2210 // page in the space, (2) allocate off the space's free list, (3) expand the | 1980 if (IsSweepingComplete()) return true; |
| 2211 // space, (4) fail. | 1981 |
| 2212 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) { | 1982 intptr_t freed_bytes = 0; |
| 2213 // Linear allocation in this space has failed. If there is another page | 1983 Page* last = last_unswept_page_; |
| 2214 // in the space, move to that page and allocate there. This allocation | 1984 Page* p = first_unswept_page_; |
| 2215 // should succeed (size_in_bytes should not be greater than a page's | 1985 do { |
| 2216 // object area size). | 1986 Page* next_page = p->next_page(); |
| 2217 Page* current_page = TopPageOf(allocation_info_); | 1987 if (ShouldBeSweptLazily(p)) { |
| 2218 if (current_page->next_page()->is_valid()) { | 1988 if (FLAG_gc_verbose) { |
| 2219 return AllocateInNextPage(current_page, size_in_bytes); | 1989 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", |
| 1990 reinterpret_cast<intptr_t>(p)); |
| 1991 } |
| 1992 freed_bytes += MarkCompactCollector::SweepConservatively(this, p); |
| 1993 } |
| 1994 p = next_page; |
| 1995 } while (p != last && freed_bytes < bytes_to_sweep); |
| 1996 |
| 1997 if (p == last) { |
| 1998 last_unswept_page_ = first_unswept_page_ = Page::FromAddress(NULL); |
| 1999 } else { |
| 2000 first_unswept_page_ = p; |
| 2220 } | 2001 } |
| 2221 | 2002 |
| 2222 // There is no next page in this space. Try free list allocation unless that | 2003 heap()->LowerOldGenLimits(freed_bytes); |
| 2223 // is currently forbidden. | |
| 2224 if (!heap()->linear_allocation()) { | |
| 2225 int wasted_bytes; | |
| 2226 Object* result; | |
| 2227 MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes); | |
| 2228 accounting_stats_.WasteBytes(wasted_bytes); | |
| 2229 if (maybe->ToObject(&result)) { | |
| 2230 accounting_stats_.AllocateBytes(size_in_bytes); | |
| 2231 | 2004 |
| 2232 HeapObject* obj = HeapObject::cast(result); | 2005 heap()->FreeQueuedChunks(); |
| 2233 Page* p = Page::FromAddress(obj->address()); | |
| 2234 | 2006 |
| 2235 if (obj->address() >= p->AllocationWatermark()) { | 2007 return IsSweepingComplete(); |
| 2236 // There should be no hole between the allocation watermark | 2008 } |
| 2237 // and allocated object address. | |
| 2238 // Memory above the allocation watermark was not swept and | |
| 2239 // might contain garbage pointers to new space. | |
| 2240 ASSERT(obj->address() == p->AllocationWatermark()); | |
| 2241 p->SetAllocationWatermark(obj->address() + size_in_bytes); | |
| 2242 } | |
| 2243 | 2009 |
| 2244 return obj; | 2010 |
| 2245 } | 2011 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { |
| 2012 if (allocation_info_.top >= allocation_info_.limit) return; |
| 2013 |
| 2014 if (Page::FromAddress(allocation_info_.top)->IsEvacuationCandidate()) { |
| 2015 // Create filler object to keep page iterable if it was iterable. |
| 2016 int remaining = |
| 2017 static_cast<int>(allocation_info_.limit - allocation_info_.top); |
| 2018 heap()->CreateFillerObjectAt(allocation_info_.top, remaining); |
| 2019 |
| 2020 allocation_info_.top = NULL; |
| 2021 allocation_info_.limit = NULL; |
| 2246 } | 2022 } |
| 2023 } |
| 2024 |
| 2025 |
| 2026 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| 2027 // Allocation in this space has failed. |
| 2247 | 2028 |
| 2248 // Free list allocation failed and there is no next page. Fail if we have | 2029 // Free list allocation failed and there is no next page. Fail if we have |
| 2249 // hit the old generation size limit that should cause a garbage | 2030 // hit the old generation size limit that should cause a garbage |
| 2250 // collection. | 2031 // collection. |
| 2251 if (!heap()->always_allocate() && | 2032 if (!heap()->always_allocate() && |
| 2252 heap()->OldGenerationAllocationLimitReached()) { | 2033 heap()->OldGenerationAllocationLimitReached()) { |
| 2253 return NULL; | 2034 return NULL; |
| 2254 } | 2035 } |
| 2255 | 2036 |
| 2037 // If there are unswept pages advance lazy sweeper. |
| 2038 if (first_unswept_page_->is_valid()) { |
| 2039 AdvanceSweeper(size_in_bytes); |
| 2040 |
| 2041 // Retry the free list allocation. |
| 2042 HeapObject* object = free_list_.Allocate(size_in_bytes); |
| 2043 if (object != NULL) return object; |
| 2044 |
| 2045 if (!IsSweepingComplete()) { |
| 2046 AdvanceSweeper(kMaxInt); |
| 2047 |
| 2048 // Retry the free list allocation. |
| 2049 object = free_list_.Allocate(size_in_bytes); |
| 2050 if (object != NULL) return object; |
| 2051 } |
| 2052 } |
| 2053 |
| 2256 // Try to expand the space and allocate in the new next page. | 2054 // Try to expand the space and allocate in the new next page. |
| 2257 ASSERT(!current_page->next_page()->is_valid()); | 2055 if (Expand()) { |
| 2258 if (Expand(current_page)) { | 2056 return free_list_.Allocate(size_in_bytes); |
| 2259 return AllocateInNextPage(current_page, size_in_bytes); | |
| 2260 } | 2057 } |
| 2261 | 2058 |
| 2262 // Finally, fail. | 2059 // Finally, fail. |
| 2263 return NULL; | 2060 return NULL; |
| 2264 } | 2061 } |
| 2265 | 2062 |
| 2266 | 2063 |
| 2267 void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { | |
| 2268 current_page->SetAllocationWatermark(allocation_info_.top); | |
| 2269 int free_size = | |
| 2270 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); | |
| 2271 if (free_size > 0) { | |
| 2272 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size); | |
| 2273 accounting_stats_.WasteBytes(wasted_bytes); | |
| 2274 } | |
| 2275 } | |
| 2276 | |
| 2277 | |
| 2278 void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { | |
| 2279 current_page->SetAllocationWatermark(allocation_info_.top); | |
| 2280 int free_size = | |
| 2281 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); | |
| 2282 // In the fixed space free list all the free list items have the right size. | |
| 2283 // We use up the rest of the page while preserving this invariant. | |
| 2284 while (free_size >= object_size_in_bytes_) { | |
| 2285 free_list_.Free(allocation_info_.top); | |
| 2286 allocation_info_.top += object_size_in_bytes_; | |
| 2287 free_size -= object_size_in_bytes_; | |
| 2288 accounting_stats_.WasteBytes(object_size_in_bytes_); | |
| 2289 } | |
| 2290 } | |
| 2291 | |
| 2292 | |
| 2293 // Add the block at the top of the page to the space's free list, set the | |
| 2294 // allocation info to the next page (assumed to be one), and allocate | |
| 2295 // linearly there. | |
| 2296 HeapObject* OldSpace::AllocateInNextPage(Page* current_page, | |
| 2297 int size_in_bytes) { | |
| 2298 ASSERT(current_page->next_page()->is_valid()); | |
| 2299 Page* next_page = current_page->next_page(); | |
| 2300 next_page->ClearGCFields(); | |
| 2301 PutRestOfCurrentPageOnFreeList(current_page); | |
| 2302 SetAllocationInfo(&allocation_info_, next_page); | |
| 2303 return AllocateLinearly(&allocation_info_, size_in_bytes); | |
| 2304 } | |
| 2305 | |
| 2306 | |
| 2307 void OldSpace::DeallocateBlock(Address start, | |
| 2308 int size_in_bytes, | |
| 2309 bool add_to_freelist) { | |
| 2310 Free(start, size_in_bytes, add_to_freelist); | |
| 2311 } | |
| 2312 | |
| 2313 | |
| 2314 #ifdef DEBUG | 2064 #ifdef DEBUG |
| 2315 void PagedSpace::ReportCodeStatistics() { | 2065 void PagedSpace::ReportCodeStatistics() { |
| 2316 Isolate* isolate = Isolate::Current(); | 2066 Isolate* isolate = Isolate::Current(); |
| 2317 CommentStatistic* comments_statistics = | 2067 CommentStatistic* comments_statistics = |
| 2318 isolate->paged_space_comments_statistics(); | 2068 isolate->paged_space_comments_statistics(); |
| 2319 ReportCodeKindStatistics(); | 2069 ReportCodeKindStatistics(); |
| 2320 PrintF("Code comment statistics (\" [ comment-txt : size/ " | 2070 PrintF("Code comment statistics (\" [ comment-txt : size/ " |
| 2321 "count (average)\"):\n"); | 2071 "count (average)\"):\n"); |
| 2322 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) { | 2072 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) { |
| 2323 const CommentStatistic& cs = comments_statistics[i]; | 2073 const CommentStatistic& cs = comments_statistics[i]; |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2406 EnterComment(isolate, comment_txt, flat_delta); | 2156 EnterComment(isolate, comment_txt, flat_delta); |
| 2407 } | 2157 } |
| 2408 | 2158 |
| 2409 | 2159 |
| 2410 // Collects code size statistics: | 2160 // Collects code size statistics: |
| 2411 // - by code kind | 2161 // - by code kind |
| 2412 // - by code comment | 2162 // - by code comment |
| 2413 void PagedSpace::CollectCodeStatistics() { | 2163 void PagedSpace::CollectCodeStatistics() { |
| 2414 Isolate* isolate = heap()->isolate(); | 2164 Isolate* isolate = heap()->isolate(); |
| 2415 HeapObjectIterator obj_it(this); | 2165 HeapObjectIterator obj_it(this); |
| 2416 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 2166 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { |
| 2417 if (obj->IsCode()) { | 2167 if (obj->IsCode()) { |
| 2418 Code* code = Code::cast(obj); | 2168 Code* code = Code::cast(obj); |
| 2419 isolate->code_kind_statistics()[code->kind()] += code->Size(); | 2169 isolate->code_kind_statistics()[code->kind()] += code->Size(); |
| 2420 RelocIterator it(code); | 2170 RelocIterator it(code); |
| 2421 int delta = 0; | 2171 int delta = 0; |
| 2422 const byte* prev_pc = code->instruction_start(); | 2172 const byte* prev_pc = code->instruction_start(); |
| 2423 while (!it.done()) { | 2173 while (!it.done()) { |
| 2424 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { | 2174 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { |
| 2425 delta += static_cast<int>(it.rinfo()->pc() - prev_pc); | 2175 delta += static_cast<int>(it.rinfo()->pc() - prev_pc); |
| 2426 CollectCommentStatistics(isolate, &it); | 2176 CollectCommentStatistics(isolate, &it); |
| 2427 prev_pc = it.rinfo()->pc(); | 2177 prev_pc = it.rinfo()->pc(); |
| 2428 } | 2178 } |
| 2429 it.next(); | 2179 it.next(); |
| 2430 } | 2180 } |
| 2431 | 2181 |
| 2432 ASSERT(code->instruction_start() <= prev_pc && | 2182 ASSERT(code->instruction_start() <= prev_pc && |
| 2433 prev_pc <= code->instruction_end()); | 2183 prev_pc <= code->instruction_end()); |
| 2434 delta += static_cast<int>(code->instruction_end() - prev_pc); | 2184 delta += static_cast<int>(code->instruction_end() - prev_pc); |
| 2435 EnterComment(isolate, "NoComment", delta); | 2185 EnterComment(isolate, "NoComment", delta); |
| 2436 } | 2186 } |
| 2437 } | 2187 } |
| 2438 } | 2188 } |
| 2439 | 2189 |
| 2440 | 2190 |
| 2441 void OldSpace::ReportStatistics() { | 2191 void PagedSpace::ReportStatistics() { |
| 2442 int pct = static_cast<int>(Available() * 100 / Capacity()); | 2192 int pct = static_cast<int>(Available() * 100 / Capacity()); |
| 2443 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 2193 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 2444 ", waste: %" V8_PTR_PREFIX "d" | 2194 ", waste: %" V8_PTR_PREFIX "d" |
| 2445 ", available: %" V8_PTR_PREFIX "d, %%%d\n", | 2195 ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
| 2446 Capacity(), Waste(), Available(), pct); | 2196 Capacity(), Waste(), Available(), pct); |
| 2447 | 2197 |
| 2198 if (was_swept_conservatively_) return; |
| 2448 ClearHistograms(); | 2199 ClearHistograms(); |
| 2449 HeapObjectIterator obj_it(this); | 2200 HeapObjectIterator obj_it(this); |
| 2450 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) | 2201 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) |
| 2451 CollectHistogramInfo(obj); | 2202 CollectHistogramInfo(obj); |
| 2452 ReportHistogram(true); | 2203 ReportHistogram(true); |
| 2453 } | 2204 } |
| 2454 #endif | 2205 #endif |
| 2455 | 2206 |
| 2456 // ----------------------------------------------------------------------------- | 2207 // ----------------------------------------------------------------------------- |
| 2457 // FixedSpace implementation | 2208 // FixedSpace implementation |
| 2458 | 2209 |
| 2459 void FixedSpace::PrepareForMarkCompact(bool will_compact) { | 2210 void FixedSpace::PrepareForMarkCompact() { |
| 2460 // Call prepare of the super class. | 2211 // Call prepare of the super class. |
| 2461 PagedSpace::PrepareForMarkCompact(will_compact); | 2212 PagedSpace::PrepareForMarkCompact(); |
| 2462 | 2213 |
| 2463 if (will_compact) { | 2214 // During a non-compacting collection, everything below the linear |
| 2464 // Reset relocation info. | 2215 // allocation pointer except wasted top-of-page blocks is considered |
| 2465 MCResetRelocationInfo(); | 2216 // allocated and we will rediscover available bytes during the |
| 2466 | 2217 // collection. |
| 2467 // During a compacting collection, everything in the space is considered | 2218 accounting_stats_.AllocateBytes(free_list_.available()); |
| 2468 // 'available' (set by the call to MCResetRelocationInfo) and we will | |
| 2469 // rediscover live and wasted bytes during the collection. | |
| 2470 ASSERT(Available() == Capacity()); | |
| 2471 } else { | |
| 2472 // During a non-compacting collection, everything below the linear | |
| 2473 // allocation pointer except wasted top-of-page blocks is considered | |
| 2474 // allocated and we will rediscover available bytes during the | |
| 2475 // collection. | |
| 2476 accounting_stats_.AllocateBytes(free_list_.available()); | |
| 2477 } | |
| 2478 | 2219 |
| 2479 // Clear the free list before a full GC---it will be rebuilt afterward. | 2220 // Clear the free list before a full GC---it will be rebuilt afterward. |
| 2480 free_list_.Reset(); | 2221 free_list_.Reset(); |
| 2481 } | 2222 } |
| 2482 | 2223 |
| 2483 | 2224 |
| 2484 void FixedSpace::MCCommitRelocationInfo() { | |
| 2485 // Update fast allocation info. | |
| 2486 allocation_info_.top = mc_forwarding_info_.top; | |
| 2487 allocation_info_.limit = mc_forwarding_info_.limit; | |
| 2488 ASSERT(allocation_info_.VerifyPagedAllocation()); | |
| 2489 | |
| 2490 // The space is compacted and we haven't yet wasted any space. | |
| 2491 ASSERT(Waste() == 0); | |
| 2492 | |
| 2493 // Update allocation_top of each page in use and compute waste. | |
| 2494 int computed_size = 0; | |
| 2495 PageIterator it(this, PageIterator::PAGES_USED_BY_MC); | |
| 2496 while (it.has_next()) { | |
| 2497 Page* page = it.next(); | |
| 2498 Address page_top = page->AllocationTop(); | |
| 2499 computed_size += static_cast<int>(page_top - page->ObjectAreaStart()); | |
| 2500 if (it.has_next()) { | |
| 2501 accounting_stats_.WasteBytes( | |
| 2502 static_cast<int>(page->ObjectAreaEnd() - page_top)); | |
| 2503 page->SetAllocationWatermark(page_top); | |
| 2504 } | |
| 2505 } | |
| 2506 | |
| 2507 // Make sure the computed size - based on the used portion of the | |
| 2508 // pages in use - matches the size we adjust during allocation. | |
| 2509 ASSERT(computed_size == Size()); | |
| 2510 } | |
| 2511 | |
| 2512 | |
| 2513 // Slow case for normal allocation. Try in order: (1) allocate in the next | |
| 2514 // page in the space, (2) allocate off the space's free list, (3) expand the | |
| 2515 // space, (4) fail. | |
| 2516 HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) { | |
| 2517 ASSERT_EQ(object_size_in_bytes_, size_in_bytes); | |
| 2518 // Linear allocation in this space has failed. If there is another page | |
| 2519 // in the space, move to that page and allocate there. This allocation | |
| 2520 // should succeed. | |
| 2521 Page* current_page = TopPageOf(allocation_info_); | |
| 2522 if (current_page->next_page()->is_valid()) { | |
| 2523 return AllocateInNextPage(current_page, size_in_bytes); | |
| 2524 } | |
| 2525 | |
| 2526 // There is no next page in this space. Try free list allocation unless | |
| 2527 // that is currently forbidden. The fixed space free list implicitly assumes | |
| 2528 // that all free blocks are of the fixed size. | |
| 2529 if (!heap()->linear_allocation()) { | |
| 2530 Object* result; | |
| 2531 MaybeObject* maybe = free_list_.Allocate(); | |
| 2532 if (maybe->ToObject(&result)) { | |
| 2533 accounting_stats_.AllocateBytes(size_in_bytes); | |
| 2534 HeapObject* obj = HeapObject::cast(result); | |
| 2535 Page* p = Page::FromAddress(obj->address()); | |
| 2536 | |
| 2537 if (obj->address() >= p->AllocationWatermark()) { | |
| 2538 // There should be no hole between the allocation watermark | |
| 2539 // and allocated object address. | |
| 2540 // Memory above the allocation watermark was not swept and | |
| 2541 // might contain garbage pointers to new space. | |
| 2542 ASSERT(obj->address() == p->AllocationWatermark()); | |
| 2543 p->SetAllocationWatermark(obj->address() + size_in_bytes); | |
| 2544 } | |
| 2545 | |
| 2546 return obj; | |
| 2547 } | |
| 2548 } | |
| 2549 | |
| 2550 // Free list allocation failed and there is no next page. Fail if we have | |
| 2551 // hit the old generation size limit that should cause a garbage | |
| 2552 // collection. | |
| 2553 if (!heap()->always_allocate() && | |
| 2554 heap()->OldGenerationAllocationLimitReached()) { | |
| 2555 return NULL; | |
| 2556 } | |
| 2557 | |
| 2558 // Try to expand the space and allocate in the new next page. | |
| 2559 ASSERT(!current_page->next_page()->is_valid()); | |
| 2560 if (Expand(current_page)) { | |
| 2561 return AllocateInNextPage(current_page, size_in_bytes); | |
| 2562 } | |
| 2563 | |
| 2564 // Finally, fail. | |
| 2565 return NULL; | |
| 2566 } | |
| 2567 | |
| 2568 | |
| 2569 // Move to the next page (there is assumed to be one) and allocate there. | |
| 2570 // The top of page block is always wasted, because it is too small to hold a | |
| 2571 // map. | |
| 2572 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page, | |
| 2573 int size_in_bytes) { | |
| 2574 ASSERT(current_page->next_page()->is_valid()); | |
| 2575 ASSERT(allocation_info_.top == PageAllocationLimit(current_page)); | |
| 2576 ASSERT_EQ(object_size_in_bytes_, size_in_bytes); | |
| 2577 Page* next_page = current_page->next_page(); | |
| 2578 next_page->ClearGCFields(); | |
| 2579 current_page->SetAllocationWatermark(allocation_info_.top); | |
| 2580 accounting_stats_.WasteBytes(page_extra_); | |
| 2581 SetAllocationInfo(&allocation_info_, next_page); | |
| 2582 return AllocateLinearly(&allocation_info_, size_in_bytes); | |
| 2583 } | |
| 2584 | |
| 2585 | |
| 2586 void FixedSpace::DeallocateBlock(Address start, | |
| 2587 int size_in_bytes, | |
| 2588 bool add_to_freelist) { | |
| 2589 // Free-list elements in fixed space are assumed to have a fixed size. | |
| 2590 // We break the free block into chunks and add them to the free list | |
| 2591 // individually. | |
| 2592 int size = object_size_in_bytes(); | |
| 2593 ASSERT(size_in_bytes % size == 0); | |
| 2594 Address end = start + size_in_bytes; | |
| 2595 for (Address a = start; a < end; a += size) { | |
| 2596 Free(a, add_to_freelist); | |
| 2597 } | |
| 2598 } | |
| 2599 | |
| 2600 | |
| 2601 #ifdef DEBUG | |
| 2602 void FixedSpace::ReportStatistics() { | |
| 2603 int pct = static_cast<int>(Available() * 100 / Capacity()); | |
| 2604 PrintF(" capacity: %" V8_PTR_PREFIX "d" | |
| 2605 ", waste: %" V8_PTR_PREFIX "d" | |
| 2606 ", available: %" V8_PTR_PREFIX "d, %%%d\n", | |
| 2607 Capacity(), Waste(), Available(), pct); | |
| 2608 | |
| 2609 ClearHistograms(); | |
| 2610 HeapObjectIterator obj_it(this); | |
| 2611 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) | |
| 2612 CollectHistogramInfo(obj); | |
| 2613 ReportHistogram(false); | |
| 2614 } | |
| 2615 #endif | |
| 2616 | |
| 2617 | |
| 2618 // ----------------------------------------------------------------------------- | 2225 // ----------------------------------------------------------------------------- |
| 2619 // MapSpace implementation | 2226 // MapSpace implementation |
| 2620 | 2227 |
| 2621 void MapSpace::PrepareForMarkCompact(bool will_compact) { | |
| 2622 // Call prepare of the super class. | |
| 2623 FixedSpace::PrepareForMarkCompact(will_compact); | |
| 2624 | |
| 2625 if (will_compact) { | |
| 2626 // Initialize map index entry. | |
| 2627 int page_count = 0; | |
| 2628 PageIterator it(this, PageIterator::ALL_PAGES); | |
| 2629 while (it.has_next()) { | |
| 2630 ASSERT_MAP_PAGE_INDEX(page_count); | |
| 2631 | |
| 2632 Page* p = it.next(); | |
| 2633 ASSERT(p->mc_page_index == page_count); | |
| 2634 | |
| 2635 page_addresses_[page_count++] = p->address(); | |
| 2636 } | |
| 2637 } | |
| 2638 } | |
| 2639 | |
| 2640 | |
| 2641 #ifdef DEBUG | 2228 #ifdef DEBUG |
| 2642 void MapSpace::VerifyObject(HeapObject* object) { | 2229 void MapSpace::VerifyObject(HeapObject* object) { |
| 2643 // The object should be a map or a free-list node. | 2230 // The object should be a map or a free-list node. |
| 2644 ASSERT(object->IsMap() || object->IsByteArray()); | 2231 ASSERT(object->IsMap() || object->IsFreeSpace()); |
| 2645 } | 2232 } |
| 2646 #endif | 2233 #endif |
| 2647 | 2234 |
| 2648 | 2235 |
| 2649 // ----------------------------------------------------------------------------- | 2236 // ----------------------------------------------------------------------------- |
| 2650 // GlobalPropertyCellSpace implementation | 2237 // GlobalPropertyCellSpace implementation |
| 2651 | 2238 |
| 2652 #ifdef DEBUG | 2239 #ifdef DEBUG |
| 2653 void CellSpace::VerifyObject(HeapObject* object) { | 2240 void CellSpace::VerifyObject(HeapObject* object) { |
| 2654 // The object should be a global object property cell or a free-list node. | 2241 // The object should be a global object property cell or a free-list node. |
| 2655 ASSERT(object->IsJSGlobalPropertyCell() || | 2242 ASSERT(object->IsJSGlobalPropertyCell() || |
| 2656 object->map() == heap()->two_pointer_filler_map()); | 2243 object->map() == heap()->two_pointer_filler_map()); |
| 2657 } | 2244 } |
| 2658 #endif | 2245 #endif |
| 2659 | 2246 |
| 2660 | 2247 |
| 2661 // ----------------------------------------------------------------------------- | 2248 // ----------------------------------------------------------------------------- |
| 2662 // LargeObjectIterator | 2249 // LargeObjectIterator |
| 2663 | 2250 |
| 2664 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { | 2251 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { |
| 2665 current_ = space->first_chunk_; | 2252 current_ = space->first_page_; |
| 2666 size_func_ = NULL; | 2253 size_func_ = NULL; |
| 2667 } | 2254 } |
| 2668 | 2255 |
| 2669 | 2256 |
| 2670 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space, | 2257 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space, |
| 2671 HeapObjectCallback size_func) { | 2258 HeapObjectCallback size_func) { |
| 2672 current_ = space->first_chunk_; | 2259 current_ = space->first_page_; |
| 2673 size_func_ = size_func; | 2260 size_func_ = size_func; |
| 2674 } | 2261 } |
| 2675 | 2262 |
| 2676 | 2263 |
| 2677 HeapObject* LargeObjectIterator::next() { | 2264 HeapObject* LargeObjectIterator::Next() { |
| 2678 if (current_ == NULL) return NULL; | 2265 if (current_ == NULL) return NULL; |
| 2679 | 2266 |
| 2680 HeapObject* object = current_->GetObject(); | 2267 HeapObject* object = current_->GetObject(); |
| 2681 current_ = current_->next(); | 2268 current_ = current_->next_page(); |
| 2682 return object; | 2269 return object; |
| 2683 } | 2270 } |
| 2684 | 2271 |
| 2685 | 2272 |
| 2686 // ----------------------------------------------------------------------------- | 2273 // ----------------------------------------------------------------------------- |
| 2687 // LargeObjectChunk | |
| 2688 | |
| 2689 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes, | |
| 2690 Executability executable) { | |
| 2691 size_t requested = ChunkSizeFor(size_in_bytes); | |
| 2692 size_t size; | |
| 2693 size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0; | |
| 2694 Isolate* isolate = Isolate::Current(); | |
| 2695 void* mem = isolate->memory_allocator()->AllocateRawMemory( | |
| 2696 requested + guard_size, &size, executable); | |
| 2697 if (mem == NULL) return NULL; | |
| 2698 | |
| 2699 // The start of the chunk may be overlayed with a page so we have to | |
| 2700 // make sure that the page flags fit in the size field. | |
| 2701 ASSERT((size & Page::kPageFlagMask) == 0); | |
| 2702 | |
| 2703 LOG(isolate, NewEvent("LargeObjectChunk", mem, size)); | |
| 2704 if (size < requested + guard_size) { | |
| 2705 isolate->memory_allocator()->FreeRawMemory( | |
| 2706 mem, size, executable); | |
| 2707 LOG(isolate, DeleteEvent("LargeObjectChunk", mem)); | |
| 2708 return NULL; | |
| 2709 } | |
| 2710 | |
| 2711 if (guard_size != 0) { | |
| 2712 OS::Guard(mem, guard_size); | |
| 2713 size -= guard_size; | |
| 2714 mem = static_cast<Address>(mem) + guard_size; | |
| 2715 } | |
| 2716 | |
| 2717 ObjectSpace space = (executable == EXECUTABLE) | |
| 2718 ? kObjectSpaceCodeSpace | |
| 2719 : kObjectSpaceLoSpace; | |
| 2720 isolate->memory_allocator()->PerformAllocationCallback( | |
| 2721 space, kAllocationActionAllocate, size); | |
| 2722 | |
| 2723 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem); | |
| 2724 chunk->size_ = size; | |
| 2725 chunk->GetPage()->heap_ = isolate->heap(); | |
| 2726 return chunk; | |
| 2727 } | |
| 2728 | |
| 2729 | |
| 2730 void LargeObjectChunk::Free(Executability executable) { | |
| 2731 size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0; | |
| 2732 ObjectSpace space = | |
| 2733 (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace; | |
| 2734 // Do not access instance fields after FreeRawMemory! | |
| 2735 Address my_address = address(); | |
| 2736 size_t my_size = size(); | |
| 2737 Isolate* isolate = GetPage()->heap_->isolate(); | |
| 2738 MemoryAllocator* a = isolate->memory_allocator(); | |
| 2739 a->FreeRawMemory(my_address - guard_size, my_size + guard_size, executable); | |
| 2740 a->PerformAllocationCallback(space, kAllocationActionFree, my_size); | |
| 2741 LOG(isolate, DeleteEvent("LargeObjectChunk", my_address)); | |
| 2742 } | |
| 2743 | |
| 2744 | |
| 2745 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { | |
| 2746 int os_alignment = static_cast<int>(OS::AllocateAlignment()); | |
| 2747 if (os_alignment < Page::kPageSize) { | |
| 2748 size_in_bytes += (Page::kPageSize - os_alignment); | |
| 2749 } | |
| 2750 return size_in_bytes + Page::kObjectStartOffset; | |
| 2751 } | |
| 2752 | |
| 2753 // ----------------------------------------------------------------------------- | |
| 2754 // LargeObjectSpace | 2274 // LargeObjectSpace |
| 2755 | 2275 |
| 2756 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id) | 2276 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id) |
| 2757 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis | 2277 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis |
| 2758 first_chunk_(NULL), | 2278 first_page_(NULL), |
| 2759 size_(0), | 2279 size_(0), |
| 2760 page_count_(0), | 2280 page_count_(0), |
| 2761 objects_size_(0) {} | 2281 objects_size_(0) {} |
| 2762 | 2282 |
| 2763 | 2283 |
| 2764 bool LargeObjectSpace::Setup() { | 2284 bool LargeObjectSpace::Setup() { |
| 2765 first_chunk_ = NULL; | 2285 first_page_ = NULL; |
| 2766 size_ = 0; | 2286 size_ = 0; |
| 2767 page_count_ = 0; | 2287 page_count_ = 0; |
| 2768 objects_size_ = 0; | 2288 objects_size_ = 0; |
| 2769 return true; | 2289 return true; |
| 2770 } | 2290 } |
| 2771 | 2291 |
| 2772 | 2292 |
| 2773 void LargeObjectSpace::TearDown() { | 2293 void LargeObjectSpace::TearDown() { |
| 2774 while (first_chunk_ != NULL) { | 2294 while (first_page_ != NULL) { |
| 2775 LargeObjectChunk* chunk = first_chunk_; | 2295 LargePage* page = first_page_; |
| 2776 first_chunk_ = first_chunk_->next(); | 2296 first_page_ = first_page_->next_page(); |
| 2777 chunk->Free(chunk->GetPage()->PageExecutability()); | 2297 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); |
| 2298 |
| 2299 ObjectSpace space = static_cast<ObjectSpace>(1 << identity()); |
| 2300 heap()->isolate()->memory_allocator()->PerformAllocationCallback( |
| 2301 space, kAllocationActionFree, page->size()); |
| 2302 heap()->isolate()->memory_allocator()->Free(page); |
| 2778 } | 2303 } |
| 2779 Setup(); | 2304 Setup(); |
| 2780 } | 2305 } |
| 2781 | 2306 |
| 2782 | 2307 |
| 2783 MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size, | 2308 MaybeObject* LargeObjectSpace::AllocateRaw(int object_size, |
| 2784 int object_size, | 2309 Executability executable) { |
| 2785 Executability executable) { | |
| 2786 ASSERT(0 < object_size && object_size <= requested_size); | |
| 2787 | |
| 2788 // Check if we want to force a GC before growing the old space further. | 2310 // Check if we want to force a GC before growing the old space further. |
| 2789 // If so, fail the allocation. | 2311 // If so, fail the allocation. |
| 2790 if (!heap()->always_allocate() && | 2312 if (!heap()->always_allocate() && |
| 2791 heap()->OldGenerationAllocationLimitReached()) { | 2313 heap()->OldGenerationAllocationLimitReached()) { |
| 2792 return Failure::RetryAfterGC(identity()); | 2314 return Failure::RetryAfterGC(identity()); |
| 2793 } | 2315 } |
| 2794 | 2316 |
| 2795 LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable); | 2317 LargePage* page = heap()->isolate()->memory_allocator()-> |
| 2796 if (chunk == NULL) { | 2318 AllocateLargePage(object_size, executable, this); |
| 2797 return Failure::RetryAfterGC(identity()); | 2319 if (page == NULL) return Failure::RetryAfterGC(identity()); |
| 2798 } | 2320 ASSERT(page->body_size() >= object_size); |
| 2799 | 2321 |
| 2800 size_ += static_cast<int>(chunk->size()); | 2322 size_ += static_cast<int>(page->size()); |
| 2801 objects_size_ += requested_size; | 2323 objects_size_ += object_size; |
| 2802 page_count_++; | 2324 page_count_++; |
| 2803 chunk->set_next(first_chunk_); | 2325 page->set_next_page(first_page_); |
| 2804 first_chunk_ = chunk; | 2326 first_page_ = page; |
| 2805 | 2327 |
| 2806 // Initialize page header. | 2328 heap()->incremental_marking()->OldSpaceStep(object_size); |
| 2807 Page* page = chunk->GetPage(); | 2329 return page->GetObject(); |
| 2808 Address object_address = page->ObjectAreaStart(); | |
| 2809 | |
| 2810 // Clear the low order bit of the second word in the page to flag it as a | |
| 2811 // large object page. If the chunk_size happened to be written there, its | |
| 2812 // low order bit should already be clear. | |
| 2813 page->SetIsLargeObjectPage(true); | |
| 2814 page->SetPageExecutability(executable); | |
| 2815 page->SetRegionMarks(Page::kAllRegionsCleanMarks); | |
| 2816 return HeapObject::FromAddress(object_address); | |
| 2817 } | |
| 2818 | |
| 2819 | |
| 2820 MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) { | |
| 2821 ASSERT(0 < size_in_bytes); | |
| 2822 return AllocateRawInternal(size_in_bytes, | |
| 2823 size_in_bytes, | |
| 2824 EXECUTABLE); | |
| 2825 } | |
| 2826 | |
| 2827 | |
| 2828 MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) { | |
| 2829 ASSERT(0 < size_in_bytes); | |
| 2830 return AllocateRawInternal(size_in_bytes, | |
| 2831 size_in_bytes, | |
| 2832 NOT_EXECUTABLE); | |
| 2833 } | |
| 2834 | |
| 2835 | |
| 2836 MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) { | |
| 2837 ASSERT(0 < size_in_bytes); | |
| 2838 return AllocateRawInternal(size_in_bytes, | |
| 2839 size_in_bytes, | |
| 2840 NOT_EXECUTABLE); | |
| 2841 } | 2330 } |
| 2842 | 2331 |
| 2843 | 2332 |
| 2844 // GC support | 2333 // GC support |
| 2845 MaybeObject* LargeObjectSpace::FindObject(Address a) { | 2334 MaybeObject* LargeObjectSpace::FindObject(Address a) { |
| 2846 for (LargeObjectChunk* chunk = first_chunk_; | 2335 for (LargePage* page = first_page_; |
| 2847 chunk != NULL; | 2336 page != NULL; |
| 2848 chunk = chunk->next()) { | 2337 page = page->next_page()) { |
| 2849 Address chunk_address = chunk->address(); | 2338 Address page_address = page->address(); |
| 2850 if (chunk_address <= a && a < chunk_address + chunk->size()) { | 2339 if (page_address <= a && a < page_address + page->size()) { |
| 2851 return chunk->GetObject(); | 2340 return page->GetObject(); |
| 2852 } | 2341 } |
| 2853 } | 2342 } |
| 2854 return Failure::Exception(); | 2343 return Failure::Exception(); |
| 2855 } | 2344 } |
| 2856 | 2345 |
| 2857 | 2346 |
| 2858 LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) { | 2347 LargePage* LargeObjectSpace::FindPageContainingPc(Address pc) { |
| 2859 // TODO(853): Change this implementation to only find executable | 2348 // TODO(853): Change this implementation to only find executable |
| 2860 // chunks and use some kind of hash-based approach to speed it up. | 2349 // chunks and use some kind of hash-based approach to speed it up. |
| 2861 for (LargeObjectChunk* chunk = first_chunk_; | 2350 for (LargePage* chunk = first_page_; |
| 2862 chunk != NULL; | 2351 chunk != NULL; |
| 2863 chunk = chunk->next()) { | 2352 chunk = chunk->next_page()) { |
| 2864 Address chunk_address = chunk->address(); | 2353 Address chunk_address = chunk->address(); |
| 2865 if (chunk_address <= pc && pc < chunk_address + chunk->size()) { | 2354 if (chunk_address <= pc && pc < chunk_address + chunk->size()) { |
| 2866 return chunk; | 2355 return chunk; |
| 2867 } | 2356 } |
| 2868 } | 2357 } |
| 2869 return NULL; | 2358 return NULL; |
| 2870 } | 2359 } |
| 2871 | 2360 |
| 2872 | 2361 |
| 2873 void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) { | |
| 2874 LargeObjectIterator it(this); | |
| 2875 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { | |
| 2876 // We only have code, sequential strings, or fixed arrays in large | |
| 2877 // object space, and only fixed arrays can possibly contain pointers to | |
| 2878 // the young generation. | |
| 2879 if (object->IsFixedArray()) { | |
| 2880 Page* page = Page::FromAddress(object->address()); | |
| 2881 uint32_t marks = page->GetRegionMarks(); | |
| 2882 uint32_t newmarks = Page::kAllRegionsCleanMarks; | |
| 2883 | |
| 2884 if (marks != Page::kAllRegionsCleanMarks) { | |
| 2885 // For a large page a single dirty mark corresponds to several | |
| 2886 // regions (modulo 32). So we treat a large page as a sequence of | |
| 2887 // normal pages of size Page::kPageSize having same dirty marks | |
| 2888 // and subsequently iterate dirty regions on each of these pages. | |
| 2889 Address start = object->address(); | |
| 2890 Address end = page->ObjectAreaEnd(); | |
| 2891 Address object_end = start + object->Size(); | |
| 2892 | |
| 2893 // Iterate regions of the first normal page covering object. | |
| 2894 uint32_t first_region_number = page->GetRegionNumberForAddress(start); | |
| 2895 newmarks |= | |
| 2896 heap()->IterateDirtyRegions(marks >> first_region_number, | |
| 2897 start, | |
| 2898 end, | |
| 2899 &Heap::IteratePointersInDirtyRegion, | |
| 2900 copy_object) << first_region_number; | |
| 2901 | |
| 2902 start = end; | |
| 2903 end = start + Page::kPageSize; | |
| 2904 while (end <= object_end) { | |
| 2905 // Iterate next 32 regions. | |
| 2906 newmarks |= | |
| 2907 heap()->IterateDirtyRegions(marks, | |
| 2908 start, | |
| 2909 end, | |
| 2910 &Heap::IteratePointersInDirtyRegion, | |
| 2911 copy_object); | |
| 2912 start = end; | |
| 2913 end = start + Page::kPageSize; | |
| 2914 } | |
| 2915 | |
| 2916 if (start != object_end) { | |
| 2917 // Iterate the last piece of an object which is less than | |
| 2918 // Page::kPageSize. | |
| 2919 newmarks |= | |
| 2920 heap()->IterateDirtyRegions(marks, | |
| 2921 start, | |
| 2922 object_end, | |
| 2923 &Heap::IteratePointersInDirtyRegion, | |
| 2924 copy_object); | |
| 2925 } | |
| 2926 | |
| 2927 page->SetRegionMarks(newmarks); | |
| 2928 } | |
| 2929 } | |
| 2930 } | |
| 2931 } | |
| 2932 | |
| 2933 | |
| 2934 void LargeObjectSpace::FreeUnmarkedObjects() { | 2362 void LargeObjectSpace::FreeUnmarkedObjects() { |
| 2935 LargeObjectChunk* previous = NULL; | 2363 LargePage* previous = NULL; |
| 2936 LargeObjectChunk* current = first_chunk_; | 2364 LargePage* current = first_page_; |
| 2937 while (current != NULL) { | 2365 while (current != NULL) { |
| 2938 HeapObject* object = current->GetObject(); | 2366 HeapObject* object = current->GetObject(); |
| 2939 if (object->IsMarked()) { | 2367 // Can this large page contain pointers to non-trivial objects. No other |
| 2940 object->ClearMark(); | 2368 // pointer object is this big. |
| 2941 heap()->mark_compact_collector()->tracer()->decrement_marked_count(); | 2369 bool is_pointer_object = object->IsFixedArray(); |
| 2370 MarkBit mark_bit = Marking::MarkBitFrom(object); |
| 2371 if (mark_bit.Get()) { |
| 2372 mark_bit.Clear(); |
| 2373 MemoryChunk::IncrementLiveBytes(object->address(), -object->Size()); |
| 2942 previous = current; | 2374 previous = current; |
| 2943 current = current->next(); | 2375 current = current->next_page(); |
| 2944 } else { | 2376 } else { |
| 2377 LargePage* page = current; |
| 2945 // Cut the chunk out from the chunk list. | 2378 // Cut the chunk out from the chunk list. |
| 2946 LargeObjectChunk* current_chunk = current; | 2379 current = current->next_page(); |
| 2947 current = current->next(); | |
| 2948 if (previous == NULL) { | 2380 if (previous == NULL) { |
| 2949 first_chunk_ = current; | 2381 first_page_ = current; |
| 2950 } else { | 2382 } else { |
| 2951 previous->set_next(current); | 2383 previous->set_next_page(current); |
| 2952 } | 2384 } |
| 2953 | 2385 |
| 2954 // Free the chunk. | 2386 // Free the chunk. |
| 2955 heap()->mark_compact_collector()->ReportDeleteIfNeeded( | 2387 heap()->mark_compact_collector()->ReportDeleteIfNeeded( |
| 2956 object, heap()->isolate()); | 2388 object, heap()->isolate()); |
| 2957 LiveObjectList::ProcessNonLive(object); | 2389 size_ -= static_cast<int>(page->size()); |
| 2958 | |
| 2959 size_ -= static_cast<int>(current_chunk->size()); | |
| 2960 objects_size_ -= object->Size(); | 2390 objects_size_ -= object->Size(); |
| 2961 page_count_--; | 2391 page_count_--; |
| 2962 current_chunk->Free(current_chunk->GetPage()->PageExecutability()); | 2392 |
| 2393 if (is_pointer_object) { |
| 2394 heap()->QueueMemoryChunkForFree(page); |
| 2395 } else { |
| 2396 heap()->isolate()->memory_allocator()->Free(page); |
| 2397 } |
| 2963 } | 2398 } |
| 2964 } | 2399 } |
| 2400 heap()->FreeQueuedChunks(); |
| 2965 } | 2401 } |
| 2966 | 2402 |
| 2967 | 2403 |
| 2968 bool LargeObjectSpace::Contains(HeapObject* object) { | 2404 bool LargeObjectSpace::Contains(HeapObject* object) { |
| 2969 Address address = object->address(); | 2405 Address address = object->address(); |
| 2970 if (heap()->new_space()->Contains(address)) { | 2406 MemoryChunk* chunk = MemoryChunk::FromAddress(address); |
| 2971 return false; | |
| 2972 } | |
| 2973 Page* page = Page::FromAddress(address); | |
| 2974 | 2407 |
| 2975 SLOW_ASSERT(!page->IsLargeObjectPage() | 2408 bool owned = (chunk->owner() == this); |
| 2976 || !FindObject(address)->IsFailure()); | |
| 2977 | 2409 |
| 2978 return page->IsLargeObjectPage(); | 2410 SLOW_ASSERT(!owned || !FindObject(address)->IsFailure()); |
| 2411 |
| 2412 return owned; |
| 2979 } | 2413 } |
| 2980 | 2414 |
| 2981 | 2415 |
| 2982 #ifdef DEBUG | 2416 #ifdef DEBUG |
| 2983 // We do not assume that the large object iterator works, because it depends | 2417 // We do not assume that the large object iterator works, because it depends |
| 2984 // on the invariants we are checking during verification. | 2418 // on the invariants we are checking during verification. |
| 2985 void LargeObjectSpace::Verify() { | 2419 void LargeObjectSpace::Verify() { |
| 2986 for (LargeObjectChunk* chunk = first_chunk_; | 2420 for (LargePage* chunk = first_page_; |
| 2987 chunk != NULL; | 2421 chunk != NULL; |
| 2988 chunk = chunk->next()) { | 2422 chunk = chunk->next_page()) { |
| 2989 // Each chunk contains an object that starts at the large object page's | 2423 // Each chunk contains an object that starts at the large object page's |
| 2990 // object area start. | 2424 // object area start. |
| 2991 HeapObject* object = chunk->GetObject(); | 2425 HeapObject* object = chunk->GetObject(); |
| 2992 Page* page = Page::FromAddress(object->address()); | 2426 Page* page = Page::FromAddress(object->address()); |
| 2993 ASSERT(object->address() == page->ObjectAreaStart()); | 2427 ASSERT(object->address() == page->ObjectAreaStart()); |
| 2994 | 2428 |
| 2995 // The first word should be a map, and we expect all map pointers to be | 2429 // The first word should be a map, and we expect all map pointers to be |
| 2996 // in map space. | 2430 // in map space. |
| 2997 Map* map = object->map(); | 2431 Map* map = object->map(); |
| 2998 ASSERT(map->IsMap()); | 2432 ASSERT(map->IsMap()); |
| 2999 ASSERT(heap()->map_space()->Contains(map)); | 2433 ASSERT(heap()->map_space()->Contains(map)); |
| 3000 | 2434 |
| 3001 // We have only code, sequential strings, external strings | 2435 // We have only code, sequential strings, external strings |
| 3002 // (sequential strings that have been morphed into external | 2436 // (sequential strings that have been morphed into external |
| 3003 // strings), fixed arrays, and byte arrays in large object space. | 2437 // strings), fixed arrays, and byte arrays in large object space. |
| 3004 ASSERT(object->IsCode() || object->IsSeqString() || | 2438 ASSERT(object->IsCode() || object->IsSeqString() || |
| 3005 object->IsExternalString() || object->IsFixedArray() || | 2439 object->IsExternalString() || object->IsFixedArray() || |
| 3006 object->IsFixedDoubleArray() || object->IsByteArray()); | 2440 object->IsFixedDoubleArray() || object->IsByteArray()); |
| 3007 | 2441 |
| 3008 // The object itself should look OK. | 2442 // The object itself should look OK. |
| 3009 object->Verify(); | 2443 object->Verify(); |
| 3010 | 2444 |
| 3011 // Byte arrays and strings don't have interior pointers. | 2445 // Byte arrays and strings don't have interior pointers. |
| 3012 if (object->IsCode()) { | 2446 if (object->IsCode()) { |
| 3013 VerifyPointersVisitor code_visitor; | 2447 VerifyPointersVisitor code_visitor; |
| 3014 object->IterateBody(map->instance_type(), | 2448 object->IterateBody(map->instance_type(), |
| 3015 object->Size(), | 2449 object->Size(), |
| 3016 &code_visitor); | 2450 &code_visitor); |
| 3017 } else if (object->IsFixedArray()) { | 2451 } else if (object->IsFixedArray()) { |
| 3018 // We loop over fixed arrays ourselves, rather then using the visitor, | |
| 3019 // because the visitor doesn't support the start/offset iteration | |
| 3020 // needed for IsRegionDirty. | |
| 3021 FixedArray* array = FixedArray::cast(object); | 2452 FixedArray* array = FixedArray::cast(object); |
| 3022 for (int j = 0; j < array->length(); j++) { | 2453 for (int j = 0; j < array->length(); j++) { |
| 3023 Object* element = array->get(j); | 2454 Object* element = array->get(j); |
| 3024 if (element->IsHeapObject()) { | 2455 if (element->IsHeapObject()) { |
| 3025 HeapObject* element_object = HeapObject::cast(element); | 2456 HeapObject* element_object = HeapObject::cast(element); |
| 3026 ASSERT(heap()->Contains(element_object)); | 2457 ASSERT(heap()->Contains(element_object)); |
| 3027 ASSERT(element_object->map()->IsMap()); | 2458 ASSERT(element_object->map()->IsMap()); |
| 3028 if (heap()->InNewSpace(element_object)) { | |
| 3029 Address array_addr = object->address(); | |
| 3030 Address element_addr = array_addr + FixedArray::kHeaderSize + | |
| 3031 j * kPointerSize; | |
| 3032 | |
| 3033 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr)); | |
| 3034 } | |
| 3035 } | 2459 } |
| 3036 } | 2460 } |
| 3037 } | 2461 } |
| 3038 } | 2462 } |
| 3039 } | 2463 } |
| 3040 | 2464 |
| 3041 | 2465 |
| 3042 void LargeObjectSpace::Print() { | 2466 void LargeObjectSpace::Print() { |
| 3043 LargeObjectIterator it(this); | 2467 LargeObjectIterator it(this); |
| 3044 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { | 2468 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 3045 obj->Print(); | 2469 obj->Print(); |
| 3046 } | 2470 } |
| 3047 } | 2471 } |
| 3048 | 2472 |
| 3049 | 2473 |
| 3050 void LargeObjectSpace::ReportStatistics() { | 2474 void LargeObjectSpace::ReportStatistics() { |
| 3051 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_); | 2475 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_); |
| 3052 int num_objects = 0; | 2476 int num_objects = 0; |
| 3053 ClearHistograms(); | 2477 ClearHistograms(); |
| 3054 LargeObjectIterator it(this); | 2478 LargeObjectIterator it(this); |
| 3055 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { | 2479 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 3056 num_objects++; | 2480 num_objects++; |
| 3057 CollectHistogramInfo(obj); | 2481 CollectHistogramInfo(obj); |
| 3058 } | 2482 } |
| 3059 | 2483 |
| 3060 PrintF(" number of objects %d, " | 2484 PrintF(" number of objects %d, " |
| 3061 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_); | 2485 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_); |
| 3062 if (num_objects > 0) ReportHistogram(false); | 2486 if (num_objects > 0) ReportHistogram(false); |
| 3063 } | 2487 } |
| 3064 | 2488 |
| 3065 | 2489 |
| 3066 void LargeObjectSpace::CollectCodeStatistics() { | 2490 void LargeObjectSpace::CollectCodeStatistics() { |
| 3067 Isolate* isolate = heap()->isolate(); | 2491 Isolate* isolate = heap()->isolate(); |
| 3068 LargeObjectIterator obj_it(this); | 2492 LargeObjectIterator obj_it(this); |
| 3069 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 2493 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { |
| 3070 if (obj->IsCode()) { | 2494 if (obj->IsCode()) { |
| 3071 Code* code = Code::cast(obj); | 2495 Code* code = Code::cast(obj); |
| 3072 isolate->code_kind_statistics()[code->kind()] += code->Size(); | 2496 isolate->code_kind_statistics()[code->kind()] += code->Size(); |
| 3073 } | 2497 } |
| 3074 } | 2498 } |
| 3075 } | 2499 } |
| 2500 |
| 2501 |
| 2502 void Page::Print() { |
| 2503 // Make a best-effort to print the objects in the page. |
| 2504 PrintF("Page@%p in %s\n", |
| 2505 this->address(), |
| 2506 AllocationSpaceName(this->owner()->identity())); |
| 2507 printf(" --------------------------------------\n"); |
| 2508 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction()); |
| 2509 unsigned mark_size = 0; |
| 2510 for (HeapObject* object = objects.Next(); |
| 2511 object != NULL; |
| 2512 object = objects.Next()) { |
| 2513 bool is_marked = Marking::MarkBitFrom(object).Get(); |
| 2514 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little. |
| 2515 if (is_marked) { |
| 2516 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object); |
| 2517 } |
| 2518 object->ShortPrint(); |
| 2519 PrintF("\n"); |
| 2520 } |
| 2521 printf(" --------------------------------------\n"); |
| 2522 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 2523 } |
| 2524 |
| 3076 #endif // DEBUG | 2525 #endif // DEBUG |
| 3077 | 2526 |
| 3078 } } // namespace v8::internal | 2527 } } // namespace v8::internal |
| OLD | NEW |