OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/base/platform/platform.h" | 7 #include "src/base/platform/platform.h" |
8 #include "src/full-codegen.h" | 8 #include "src/full-codegen.h" |
9 #include "src/macro-assembler.h" | 9 #include "src/macro-assembler.h" |
10 #include "src/mark-compact.h" | 10 #include "src/mark-compact.h" |
(...skipping 29 matching lines...) Expand all Loading... |
40 NULL, | 40 NULL, |
41 NULL, | 41 NULL, |
42 kAllPagesInSpace, | 42 kAllPagesInSpace, |
43 size_func); | 43 size_func); |
44 } | 44 } |
45 | 45 |
46 | 46 |
47 HeapObjectIterator::HeapObjectIterator(Page* page, | 47 HeapObjectIterator::HeapObjectIterator(Page* page, |
48 HeapObjectCallback size_func) { | 48 HeapObjectCallback size_func) { |
49 Space* owner = page->owner(); | 49 Space* owner = page->owner(); |
50 ASSERT(owner == page->heap()->old_pointer_space() || | 50 DCHECK(owner == page->heap()->old_pointer_space() || |
51 owner == page->heap()->old_data_space() || | 51 owner == page->heap()->old_data_space() || |
52 owner == page->heap()->map_space() || | 52 owner == page->heap()->map_space() || |
53 owner == page->heap()->cell_space() || | 53 owner == page->heap()->cell_space() || |
54 owner == page->heap()->property_cell_space() || | 54 owner == page->heap()->property_cell_space() || |
55 owner == page->heap()->code_space()); | 55 owner == page->heap()->code_space()); |
56 Initialize(reinterpret_cast<PagedSpace*>(owner), | 56 Initialize(reinterpret_cast<PagedSpace*>(owner), |
57 page->area_start(), | 57 page->area_start(), |
58 page->area_end(), | 58 page->area_end(), |
59 kOnePageOnly, | 59 kOnePageOnly, |
60 size_func); | 60 size_func); |
61 ASSERT(page->WasSweptPrecisely() || page->SweepingCompleted()); | 61 DCHECK(page->WasSweptPrecisely() || page->SweepingCompleted()); |
62 } | 62 } |
63 | 63 |
64 | 64 |
65 void HeapObjectIterator::Initialize(PagedSpace* space, | 65 void HeapObjectIterator::Initialize(PagedSpace* space, |
66 Address cur, Address end, | 66 Address cur, Address end, |
67 HeapObjectIterator::PageMode mode, | 67 HeapObjectIterator::PageMode mode, |
68 HeapObjectCallback size_f) { | 68 HeapObjectCallback size_f) { |
69 // Check that we actually can iterate this space. | 69 // Check that we actually can iterate this space. |
70 ASSERT(space->swept_precisely()); | 70 DCHECK(space->swept_precisely()); |
71 | 71 |
72 space_ = space; | 72 space_ = space; |
73 cur_addr_ = cur; | 73 cur_addr_ = cur; |
74 cur_end_ = end; | 74 cur_end_ = end; |
75 page_mode_ = mode; | 75 page_mode_ = mode; |
76 size_func_ = size_f; | 76 size_func_ = size_f; |
77 } | 77 } |
78 | 78 |
79 | 79 |
80 // We have hit the end of the page and should advance to the next block of | 80 // We have hit the end of the page and should advance to the next block of |
81 // objects. This happens at the end of the page. | 81 // objects. This happens at the end of the page. |
82 bool HeapObjectIterator::AdvanceToNextPage() { | 82 bool HeapObjectIterator::AdvanceToNextPage() { |
83 ASSERT(cur_addr_ == cur_end_); | 83 DCHECK(cur_addr_ == cur_end_); |
84 if (page_mode_ == kOnePageOnly) return false; | 84 if (page_mode_ == kOnePageOnly) return false; |
85 Page* cur_page; | 85 Page* cur_page; |
86 if (cur_addr_ == NULL) { | 86 if (cur_addr_ == NULL) { |
87 cur_page = space_->anchor(); | 87 cur_page = space_->anchor(); |
88 } else { | 88 } else { |
89 cur_page = Page::FromAddress(cur_addr_ - 1); | 89 cur_page = Page::FromAddress(cur_addr_ - 1); |
90 ASSERT(cur_addr_ == cur_page->area_end()); | 90 DCHECK(cur_addr_ == cur_page->area_end()); |
91 } | 91 } |
92 cur_page = cur_page->next_page(); | 92 cur_page = cur_page->next_page(); |
93 if (cur_page == space_->anchor()) return false; | 93 if (cur_page == space_->anchor()) return false; |
94 cur_addr_ = cur_page->area_start(); | 94 cur_addr_ = cur_page->area_start(); |
95 cur_end_ = cur_page->area_end(); | 95 cur_end_ = cur_page->area_end(); |
96 ASSERT(cur_page->WasSweptPrecisely()); | 96 DCHECK(cur_page->WasSweptPrecisely()); |
97 return true; | 97 return true; |
98 } | 98 } |
99 | 99 |
100 | 100 |
101 // ----------------------------------------------------------------------------- | 101 // ----------------------------------------------------------------------------- |
102 // CodeRange | 102 // CodeRange |
103 | 103 |
104 | 104 |
105 CodeRange::CodeRange(Isolate* isolate) | 105 CodeRange::CodeRange(Isolate* isolate) |
106 : isolate_(isolate), | 106 : isolate_(isolate), |
107 code_range_(NULL), | 107 code_range_(NULL), |
108 free_list_(0), | 108 free_list_(0), |
109 allocation_list_(0), | 109 allocation_list_(0), |
110 current_allocation_block_index_(0) { | 110 current_allocation_block_index_(0) { |
111 } | 111 } |
112 | 112 |
113 | 113 |
114 bool CodeRange::SetUp(size_t requested) { | 114 bool CodeRange::SetUp(size_t requested) { |
115 ASSERT(code_range_ == NULL); | 115 DCHECK(code_range_ == NULL); |
116 | 116 |
117 if (requested == 0) { | 117 if (requested == 0) { |
118 // When a target requires the code range feature, we put all code objects | 118 // When a target requires the code range feature, we put all code objects |
119 // in a kMaximalCodeRangeSize range of virtual address space, so that | 119 // in a kMaximalCodeRangeSize range of virtual address space, so that |
120 // they can call each other with near calls. | 120 // they can call each other with near calls. |
121 if (kRequiresCodeRange) { | 121 if (kRequiresCodeRange) { |
122 requested = kMaximalCodeRangeSize; | 122 requested = kMaximalCodeRangeSize; |
123 } else { | 123 } else { |
124 return true; | 124 return true; |
125 } | 125 } |
126 } | 126 } |
127 | 127 |
128 ASSERT(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); | 128 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); |
129 code_range_ = new base::VirtualMemory(requested); | 129 code_range_ = new base::VirtualMemory(requested); |
130 CHECK(code_range_ != NULL); | 130 CHECK(code_range_ != NULL); |
131 if (!code_range_->IsReserved()) { | 131 if (!code_range_->IsReserved()) { |
132 delete code_range_; | 132 delete code_range_; |
133 code_range_ = NULL; | 133 code_range_ = NULL; |
134 return false; | 134 return false; |
135 } | 135 } |
136 | 136 |
137 // We are sure that we have mapped a block of requested addresses. | 137 // We are sure that we have mapped a block of requested addresses. |
138 ASSERT(code_range_->size() == requested); | 138 DCHECK(code_range_->size() == requested); |
139 LOG(isolate_, | 139 LOG(isolate_, |
140 NewEvent("CodeRange", code_range_->address(), requested)); | 140 NewEvent("CodeRange", code_range_->address(), requested)); |
141 Address base = reinterpret_cast<Address>(code_range_->address()); | 141 Address base = reinterpret_cast<Address>(code_range_->address()); |
142 Address aligned_base = | 142 Address aligned_base = |
143 RoundUp(reinterpret_cast<Address>(code_range_->address()), | 143 RoundUp(reinterpret_cast<Address>(code_range_->address()), |
144 MemoryChunk::kAlignment); | 144 MemoryChunk::kAlignment); |
145 size_t size = code_range_->size() - (aligned_base - base); | 145 size_t size = code_range_->size() - (aligned_base - base); |
146 allocation_list_.Add(FreeBlock(aligned_base, size)); | 146 allocation_list_.Add(FreeBlock(aligned_base, size)); |
147 current_allocation_block_index_ = 0; | 147 current_allocation_block_index_ = 0; |
148 return true; | 148 return true; |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
195 } | 195 } |
196 current_allocation_block_index_ = 0; | 196 current_allocation_block_index_ = 0; |
197 // Code range is full or too fragmented. | 197 // Code range is full or too fragmented. |
198 return false; | 198 return false; |
199 } | 199 } |
200 | 200 |
201 | 201 |
202 Address CodeRange::AllocateRawMemory(const size_t requested_size, | 202 Address CodeRange::AllocateRawMemory(const size_t requested_size, |
203 const size_t commit_size, | 203 const size_t commit_size, |
204 size_t* allocated) { | 204 size_t* allocated) { |
205 ASSERT(commit_size <= requested_size); | 205 DCHECK(commit_size <= requested_size); |
206 ASSERT(current_allocation_block_index_ < allocation_list_.length()); | 206 DCHECK(current_allocation_block_index_ < allocation_list_.length()); |
207 if (requested_size > allocation_list_[current_allocation_block_index_].size) { | 207 if (requested_size > allocation_list_[current_allocation_block_index_].size) { |
208 // Find an allocation block large enough. | 208 // Find an allocation block large enough. |
209 if (!GetNextAllocationBlock(requested_size)) return NULL; | 209 if (!GetNextAllocationBlock(requested_size)) return NULL; |
210 } | 210 } |
211 // Commit the requested memory at the start of the current allocation block. | 211 // Commit the requested memory at the start of the current allocation block. |
212 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); | 212 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); |
213 FreeBlock current = allocation_list_[current_allocation_block_index_]; | 213 FreeBlock current = allocation_list_[current_allocation_block_index_]; |
214 if (aligned_requested >= (current.size - Page::kPageSize)) { | 214 if (aligned_requested >= (current.size - Page::kPageSize)) { |
215 // Don't leave a small free block, useless for a large object or chunk. | 215 // Don't leave a small free block, useless for a large object or chunk. |
216 *allocated = current.size; | 216 *allocated = current.size; |
217 } else { | 217 } else { |
218 *allocated = aligned_requested; | 218 *allocated = aligned_requested; |
219 } | 219 } |
220 ASSERT(*allocated <= current.size); | 220 DCHECK(*allocated <= current.size); |
221 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); | 221 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment)); |
222 if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_, | 222 if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_, |
223 current.start, | 223 current.start, |
224 commit_size, | 224 commit_size, |
225 *allocated)) { | 225 *allocated)) { |
226 *allocated = 0; | 226 *allocated = 0; |
227 return NULL; | 227 return NULL; |
228 } | 228 } |
229 allocation_list_[current_allocation_block_index_].start += *allocated; | 229 allocation_list_[current_allocation_block_index_].start += *allocated; |
230 allocation_list_[current_allocation_block_index_].size -= *allocated; | 230 allocation_list_[current_allocation_block_index_].size -= *allocated; |
231 if (*allocated == current.size) { | 231 if (*allocated == current.size) { |
232 // This block is used up, get the next one. | 232 // This block is used up, get the next one. |
233 if (!GetNextAllocationBlock(0)) return NULL; | 233 if (!GetNextAllocationBlock(0)) return NULL; |
234 } | 234 } |
235 return current.start; | 235 return current.start; |
236 } | 236 } |
237 | 237 |
238 | 238 |
239 bool CodeRange::CommitRawMemory(Address start, size_t length) { | 239 bool CodeRange::CommitRawMemory(Address start, size_t length) { |
240 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); | 240 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); |
241 } | 241 } |
242 | 242 |
243 | 243 |
244 bool CodeRange::UncommitRawMemory(Address start, size_t length) { | 244 bool CodeRange::UncommitRawMemory(Address start, size_t length) { |
245 return code_range_->Uncommit(start, length); | 245 return code_range_->Uncommit(start, length); |
246 } | 246 } |
247 | 247 |
248 | 248 |
249 void CodeRange::FreeRawMemory(Address address, size_t length) { | 249 void CodeRange::FreeRawMemory(Address address, size_t length) { |
250 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); | 250 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); |
251 free_list_.Add(FreeBlock(address, length)); | 251 free_list_.Add(FreeBlock(address, length)); |
252 code_range_->Uncommit(address, length); | 252 code_range_->Uncommit(address, length); |
253 } | 253 } |
254 | 254 |
255 | 255 |
256 void CodeRange::TearDown() { | 256 void CodeRange::TearDown() { |
257 delete code_range_; // Frees all memory in the virtual memory range. | 257 delete code_range_; // Frees all memory in the virtual memory range. |
258 code_range_ = NULL; | 258 code_range_ = NULL; |
259 free_list_.Free(); | 259 free_list_.Free(); |
260 allocation_list_.Free(); | 260 allocation_list_.Free(); |
(...skipping 11 matching lines...) Expand all Loading... |
272 size_(0), | 272 size_(0), |
273 size_executable_(0), | 273 size_executable_(0), |
274 lowest_ever_allocated_(reinterpret_cast<void*>(-1)), | 274 lowest_ever_allocated_(reinterpret_cast<void*>(-1)), |
275 highest_ever_allocated_(reinterpret_cast<void*>(0)) { | 275 highest_ever_allocated_(reinterpret_cast<void*>(0)) { |
276 } | 276 } |
277 | 277 |
278 | 278 |
279 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { | 279 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { |
280 capacity_ = RoundUp(capacity, Page::kPageSize); | 280 capacity_ = RoundUp(capacity, Page::kPageSize); |
281 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); | 281 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); |
282 ASSERT_GE(capacity_, capacity_executable_); | 282 DCHECK_GE(capacity_, capacity_executable_); |
283 | 283 |
284 size_ = 0; | 284 size_ = 0; |
285 size_executable_ = 0; | 285 size_executable_ = 0; |
286 | 286 |
287 return true; | 287 return true; |
288 } | 288 } |
289 | 289 |
290 | 290 |
291 void MemoryAllocator::TearDown() { | 291 void MemoryAllocator::TearDown() { |
292 // Check that spaces were torn down before MemoryAllocator. | 292 // Check that spaces were torn down before MemoryAllocator. |
293 ASSERT(size_ == 0); | 293 DCHECK(size_ == 0); |
294 // TODO(gc) this will be true again when we fix FreeMemory. | 294 // TODO(gc) this will be true again when we fix FreeMemory. |
295 // ASSERT(size_executable_ == 0); | 295 // DCHECK(size_executable_ == 0); |
296 capacity_ = 0; | 296 capacity_ = 0; |
297 capacity_executable_ = 0; | 297 capacity_executable_ = 0; |
298 } | 298 } |
299 | 299 |
300 | 300 |
301 bool MemoryAllocator::CommitMemory(Address base, | 301 bool MemoryAllocator::CommitMemory(Address base, |
302 size_t size, | 302 size_t size, |
303 Executability executable) { | 303 Executability executable) { |
304 if (!base::VirtualMemory::CommitRegion(base, size, | 304 if (!base::VirtualMemory::CommitRegion(base, size, |
305 executable == EXECUTABLE)) { | 305 executable == EXECUTABLE)) { |
306 return false; | 306 return false; |
307 } | 307 } |
308 UpdateAllocatedSpaceLimits(base, base + size); | 308 UpdateAllocatedSpaceLimits(base, base + size); |
309 return true; | 309 return true; |
310 } | 310 } |
311 | 311 |
312 | 312 |
313 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, | 313 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, |
314 Executability executable) { | 314 Executability executable) { |
315 // TODO(gc) make code_range part of memory allocator? | 315 // TODO(gc) make code_range part of memory allocator? |
316 ASSERT(reservation->IsReserved()); | 316 DCHECK(reservation->IsReserved()); |
317 size_t size = reservation->size(); | 317 size_t size = reservation->size(); |
318 ASSERT(size_ >= size); | 318 DCHECK(size_ >= size); |
319 size_ -= size; | 319 size_ -= size; |
320 | 320 |
321 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 321 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
322 | 322 |
323 if (executable == EXECUTABLE) { | 323 if (executable == EXECUTABLE) { |
324 ASSERT(size_executable_ >= size); | 324 DCHECK(size_executable_ >= size); |
325 size_executable_ -= size; | 325 size_executable_ -= size; |
326 } | 326 } |
327 // Code which is part of the code-range does not have its own VirtualMemory. | 327 // Code which is part of the code-range does not have its own VirtualMemory. |
328 ASSERT(isolate_->code_range() == NULL || | 328 DCHECK(isolate_->code_range() == NULL || |
329 !isolate_->code_range()->contains( | 329 !isolate_->code_range()->contains( |
330 static_cast<Address>(reservation->address()))); | 330 static_cast<Address>(reservation->address()))); |
331 ASSERT(executable == NOT_EXECUTABLE || | 331 DCHECK(executable == NOT_EXECUTABLE || |
332 isolate_->code_range() == NULL || | 332 isolate_->code_range() == NULL || |
333 !isolate_->code_range()->valid()); | 333 !isolate_->code_range()->valid()); |
334 reservation->Release(); | 334 reservation->Release(); |
335 } | 335 } |
336 | 336 |
337 | 337 |
338 void MemoryAllocator::FreeMemory(Address base, | 338 void MemoryAllocator::FreeMemory(Address base, |
339 size_t size, | 339 size_t size, |
340 Executability executable) { | 340 Executability executable) { |
341 // TODO(gc) make code_range part of memory allocator? | 341 // TODO(gc) make code_range part of memory allocator? |
342 ASSERT(size_ >= size); | 342 DCHECK(size_ >= size); |
343 size_ -= size; | 343 size_ -= size; |
344 | 344 |
345 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 345 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
346 | 346 |
347 if (executable == EXECUTABLE) { | 347 if (executable == EXECUTABLE) { |
348 ASSERT(size_executable_ >= size); | 348 DCHECK(size_executable_ >= size); |
349 size_executable_ -= size; | 349 size_executable_ -= size; |
350 } | 350 } |
351 if (isolate_->code_range() != NULL && | 351 if (isolate_->code_range() != NULL && |
352 isolate_->code_range()->contains(static_cast<Address>(base))) { | 352 isolate_->code_range()->contains(static_cast<Address>(base))) { |
353 ASSERT(executable == EXECUTABLE); | 353 DCHECK(executable == EXECUTABLE); |
354 isolate_->code_range()->FreeRawMemory(base, size); | 354 isolate_->code_range()->FreeRawMemory(base, size); |
355 } else { | 355 } else { |
356 ASSERT(executable == NOT_EXECUTABLE || | 356 DCHECK(executable == NOT_EXECUTABLE || |
357 isolate_->code_range() == NULL || | 357 isolate_->code_range() == NULL || |
358 !isolate_->code_range()->valid()); | 358 !isolate_->code_range()->valid()); |
359 bool result = base::VirtualMemory::ReleaseRegion(base, size); | 359 bool result = base::VirtualMemory::ReleaseRegion(base, size); |
360 USE(result); | 360 USE(result); |
361 ASSERT(result); | 361 DCHECK(result); |
362 } | 362 } |
363 } | 363 } |
364 | 364 |
365 | 365 |
366 Address MemoryAllocator::ReserveAlignedMemory(size_t size, | 366 Address MemoryAllocator::ReserveAlignedMemory(size_t size, |
367 size_t alignment, | 367 size_t alignment, |
368 base::VirtualMemory* controller) { | 368 base::VirtualMemory* controller) { |
369 base::VirtualMemory reservation(size, alignment); | 369 base::VirtualMemory reservation(size, alignment); |
370 | 370 |
371 if (!reservation.IsReserved()) return NULL; | 371 if (!reservation.IsReserved()) return NULL; |
372 size_ += reservation.size(); | 372 size_ += reservation.size(); |
373 Address base = RoundUp(static_cast<Address>(reservation.address()), | 373 Address base = RoundUp(static_cast<Address>(reservation.address()), |
374 alignment); | 374 alignment); |
375 controller->TakeControl(&reservation); | 375 controller->TakeControl(&reservation); |
376 return base; | 376 return base; |
377 } | 377 } |
378 | 378 |
379 | 379 |
380 Address MemoryAllocator::AllocateAlignedMemory( | 380 Address MemoryAllocator::AllocateAlignedMemory( |
381 size_t reserve_size, size_t commit_size, size_t alignment, | 381 size_t reserve_size, size_t commit_size, size_t alignment, |
382 Executability executable, base::VirtualMemory* controller) { | 382 Executability executable, base::VirtualMemory* controller) { |
383 ASSERT(commit_size <= reserve_size); | 383 DCHECK(commit_size <= reserve_size); |
384 base::VirtualMemory reservation; | 384 base::VirtualMemory reservation; |
385 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation); | 385 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation); |
386 if (base == NULL) return NULL; | 386 if (base == NULL) return NULL; |
387 | 387 |
388 if (executable == EXECUTABLE) { | 388 if (executable == EXECUTABLE) { |
389 if (!CommitExecutableMemory(&reservation, | 389 if (!CommitExecutableMemory(&reservation, |
390 base, | 390 base, |
391 commit_size, | 391 commit_size, |
392 reserve_size)) { | 392 reserve_size)) { |
393 base = NULL; | 393 base = NULL; |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
431 area_start, | 431 area_start, |
432 area_end, | 432 area_end, |
433 NOT_EXECUTABLE, | 433 NOT_EXECUTABLE, |
434 semi_space); | 434 semi_space); |
435 chunk->set_next_chunk(NULL); | 435 chunk->set_next_chunk(NULL); |
436 chunk->set_prev_chunk(NULL); | 436 chunk->set_prev_chunk(NULL); |
437 chunk->initialize_scan_on_scavenge(true); | 437 chunk->initialize_scan_on_scavenge(true); |
438 bool in_to_space = (semi_space->id() != kFromSpace); | 438 bool in_to_space = (semi_space->id() != kFromSpace); |
439 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE | 439 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE |
440 : MemoryChunk::IN_FROM_SPACE); | 440 : MemoryChunk::IN_FROM_SPACE); |
441 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE | 441 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE |
442 : MemoryChunk::IN_TO_SPACE)); | 442 : MemoryChunk::IN_TO_SPACE)); |
443 NewSpacePage* page = static_cast<NewSpacePage*>(chunk); | 443 NewSpacePage* page = static_cast<NewSpacePage*>(chunk); |
444 heap->incremental_marking()->SetNewSpacePageFlags(page); | 444 heap->incremental_marking()->SetNewSpacePageFlags(page); |
445 return page; | 445 return page; |
446 } | 446 } |
447 | 447 |
448 | 448 |
449 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { | 449 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { |
450 set_owner(semi_space); | 450 set_owner(semi_space); |
451 set_next_chunk(this); | 451 set_next_chunk(this); |
452 set_prev_chunk(this); | 452 set_prev_chunk(this); |
453 // Flags marks this invalid page as not being in new-space. | 453 // Flags marks this invalid page as not being in new-space. |
454 // All real new-space pages will be in new-space. | 454 // All real new-space pages will be in new-space. |
455 SetFlags(0, ~0); | 455 SetFlags(0, ~0); |
456 } | 456 } |
457 | 457 |
458 | 458 |
459 MemoryChunk* MemoryChunk::Initialize(Heap* heap, | 459 MemoryChunk* MemoryChunk::Initialize(Heap* heap, |
460 Address base, | 460 Address base, |
461 size_t size, | 461 size_t size, |
462 Address area_start, | 462 Address area_start, |
463 Address area_end, | 463 Address area_end, |
464 Executability executable, | 464 Executability executable, |
465 Space* owner) { | 465 Space* owner) { |
466 MemoryChunk* chunk = FromAddress(base); | 466 MemoryChunk* chunk = FromAddress(base); |
467 | 467 |
468 ASSERT(base == chunk->address()); | 468 DCHECK(base == chunk->address()); |
469 | 469 |
470 chunk->heap_ = heap; | 470 chunk->heap_ = heap; |
471 chunk->size_ = size; | 471 chunk->size_ = size; |
472 chunk->area_start_ = area_start; | 472 chunk->area_start_ = area_start; |
473 chunk->area_end_ = area_end; | 473 chunk->area_end_ = area_end; |
474 chunk->flags_ = 0; | 474 chunk->flags_ = 0; |
475 chunk->set_owner(owner); | 475 chunk->set_owner(owner); |
476 chunk->InitializeReservedMemory(); | 476 chunk->InitializeReservedMemory(); |
477 chunk->slots_buffer_ = NULL; | 477 chunk->slots_buffer_ = NULL; |
478 chunk->skip_list_ = NULL; | 478 chunk->skip_list_ = NULL; |
479 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; | 479 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; |
480 chunk->progress_bar_ = 0; | 480 chunk->progress_bar_ = 0; |
481 chunk->high_water_mark_ = static_cast<int>(area_start - base); | 481 chunk->high_water_mark_ = static_cast<int>(area_start - base); |
482 chunk->set_parallel_sweeping(SWEEPING_DONE); | 482 chunk->set_parallel_sweeping(SWEEPING_DONE); |
483 chunk->available_in_small_free_list_ = 0; | 483 chunk->available_in_small_free_list_ = 0; |
484 chunk->available_in_medium_free_list_ = 0; | 484 chunk->available_in_medium_free_list_ = 0; |
485 chunk->available_in_large_free_list_ = 0; | 485 chunk->available_in_large_free_list_ = 0; |
486 chunk->available_in_huge_free_list_ = 0; | 486 chunk->available_in_huge_free_list_ = 0; |
487 chunk->non_available_small_blocks_ = 0; | 487 chunk->non_available_small_blocks_ = 0; |
488 chunk->ResetLiveBytes(); | 488 chunk->ResetLiveBytes(); |
489 Bitmap::Clear(chunk); | 489 Bitmap::Clear(chunk); |
490 chunk->initialize_scan_on_scavenge(false); | 490 chunk->initialize_scan_on_scavenge(false); |
491 chunk->SetFlag(WAS_SWEPT_PRECISELY); | 491 chunk->SetFlag(WAS_SWEPT_PRECISELY); |
492 | 492 |
493 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); | 493 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); |
494 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); | 494 DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); |
495 | 495 |
496 if (executable == EXECUTABLE) { | 496 if (executable == EXECUTABLE) { |
497 chunk->SetFlag(IS_EXECUTABLE); | 497 chunk->SetFlag(IS_EXECUTABLE); |
498 } | 498 } |
499 | 499 |
500 if (owner == heap->old_data_space()) { | 500 if (owner == heap->old_data_space()) { |
501 chunk->SetFlag(CONTAINS_ONLY_DATA); | 501 chunk->SetFlag(CONTAINS_ONLY_DATA); |
502 } | 502 } |
503 | 503 |
504 return chunk; | 504 return chunk; |
505 } | 505 } |
506 | 506 |
507 | 507 |
508 // Commit MemoryChunk area to the requested size. | 508 // Commit MemoryChunk area to the requested size. |
509 bool MemoryChunk::CommitArea(size_t requested) { | 509 bool MemoryChunk::CommitArea(size_t requested) { |
510 size_t guard_size = IsFlagSet(IS_EXECUTABLE) ? | 510 size_t guard_size = IsFlagSet(IS_EXECUTABLE) ? |
511 MemoryAllocator::CodePageGuardSize() : 0; | 511 MemoryAllocator::CodePageGuardSize() : 0; |
512 size_t header_size = area_start() - address() - guard_size; | 512 size_t header_size = area_start() - address() - guard_size; |
513 size_t commit_size = | 513 size_t commit_size = |
514 RoundUp(header_size + requested, base::OS::CommitPageSize()); | 514 RoundUp(header_size + requested, base::OS::CommitPageSize()); |
515 size_t committed_size = RoundUp(header_size + (area_end() - area_start()), | 515 size_t committed_size = RoundUp(header_size + (area_end() - area_start()), |
516 base::OS::CommitPageSize()); | 516 base::OS::CommitPageSize()); |
517 | 517 |
518 if (commit_size > committed_size) { | 518 if (commit_size > committed_size) { |
519 // Commit size should be less or equal than the reserved size. | 519 // Commit size should be less or equal than the reserved size. |
520 ASSERT(commit_size <= size() - 2 * guard_size); | 520 DCHECK(commit_size <= size() - 2 * guard_size); |
521 // Append the committed area. | 521 // Append the committed area. |
522 Address start = address() + committed_size + guard_size; | 522 Address start = address() + committed_size + guard_size; |
523 size_t length = commit_size - committed_size; | 523 size_t length = commit_size - committed_size; |
524 if (reservation_.IsReserved()) { | 524 if (reservation_.IsReserved()) { |
525 Executability executable = IsFlagSet(IS_EXECUTABLE) | 525 Executability executable = IsFlagSet(IS_EXECUTABLE) |
526 ? EXECUTABLE : NOT_EXECUTABLE; | 526 ? EXECUTABLE : NOT_EXECUTABLE; |
527 if (!heap()->isolate()->memory_allocator()->CommitMemory( | 527 if (!heap()->isolate()->memory_allocator()->CommitMemory( |
528 start, length, executable)) { | 528 start, length, executable)) { |
529 return false; | 529 return false; |
530 } | 530 } |
531 } else { | 531 } else { |
532 CodeRange* code_range = heap_->isolate()->code_range(); | 532 CodeRange* code_range = heap_->isolate()->code_range(); |
533 ASSERT(code_range != NULL && code_range->valid() && | 533 DCHECK(code_range != NULL && code_range->valid() && |
534 IsFlagSet(IS_EXECUTABLE)); | 534 IsFlagSet(IS_EXECUTABLE)); |
535 if (!code_range->CommitRawMemory(start, length)) return false; | 535 if (!code_range->CommitRawMemory(start, length)) return false; |
536 } | 536 } |
537 | 537 |
538 if (Heap::ShouldZapGarbage()) { | 538 if (Heap::ShouldZapGarbage()) { |
539 heap_->isolate()->memory_allocator()->ZapBlock(start, length); | 539 heap_->isolate()->memory_allocator()->ZapBlock(start, length); |
540 } | 540 } |
541 } else if (commit_size < committed_size) { | 541 } else if (commit_size < committed_size) { |
542 ASSERT(commit_size > 0); | 542 DCHECK(commit_size > 0); |
543 // Shrink the committed area. | 543 // Shrink the committed area. |
544 size_t length = committed_size - commit_size; | 544 size_t length = committed_size - commit_size; |
545 Address start = address() + committed_size + guard_size - length; | 545 Address start = address() + committed_size + guard_size - length; |
546 if (reservation_.IsReserved()) { | 546 if (reservation_.IsReserved()) { |
547 if (!reservation_.Uncommit(start, length)) return false; | 547 if (!reservation_.Uncommit(start, length)) return false; |
548 } else { | 548 } else { |
549 CodeRange* code_range = heap_->isolate()->code_range(); | 549 CodeRange* code_range = heap_->isolate()->code_range(); |
550 ASSERT(code_range != NULL && code_range->valid() && | 550 DCHECK(code_range != NULL && code_range->valid() && |
551 IsFlagSet(IS_EXECUTABLE)); | 551 IsFlagSet(IS_EXECUTABLE)); |
552 if (!code_range->UncommitRawMemory(start, length)) return false; | 552 if (!code_range->UncommitRawMemory(start, length)) return false; |
553 } | 553 } |
554 } | 554 } |
555 | 555 |
556 area_end_ = area_start_ + requested; | 556 area_end_ = area_start_ + requested; |
557 return true; | 557 return true; |
558 } | 558 } |
559 | 559 |
560 | 560 |
(...skipping 14 matching lines...) Expand all Loading... |
575 prev_element->set_next_chunk(next_element); | 575 prev_element->set_next_chunk(next_element); |
576 set_prev_chunk(NULL); | 576 set_prev_chunk(NULL); |
577 set_next_chunk(NULL); | 577 set_next_chunk(NULL); |
578 } | 578 } |
579 | 579 |
580 | 580 |
581 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, | 581 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, |
582 intptr_t commit_area_size, | 582 intptr_t commit_area_size, |
583 Executability executable, | 583 Executability executable, |
584 Space* owner) { | 584 Space* owner) { |
585 ASSERT(commit_area_size <= reserve_area_size); | 585 DCHECK(commit_area_size <= reserve_area_size); |
586 | 586 |
587 size_t chunk_size; | 587 size_t chunk_size; |
588 Heap* heap = isolate_->heap(); | 588 Heap* heap = isolate_->heap(); |
589 Address base = NULL; | 589 Address base = NULL; |
590 base::VirtualMemory reservation; | 590 base::VirtualMemory reservation; |
591 Address area_start = NULL; | 591 Address area_start = NULL; |
592 Address area_end = NULL; | 592 Address area_end = NULL; |
593 | 593 |
594 // | 594 // |
595 // MemoryChunk layout: | 595 // MemoryChunk layout: |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
635 | 635 |
636 // Size of header (not executable) plus area (executable). | 636 // Size of header (not executable) plus area (executable). |
637 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, | 637 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, |
638 base::OS::CommitPageSize()); | 638 base::OS::CommitPageSize()); |
639 // Allocate executable memory either from code range or from the | 639 // Allocate executable memory either from code range or from the |
640 // OS. | 640 // OS. |
641 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) { | 641 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) { |
642 base = isolate_->code_range()->AllocateRawMemory(chunk_size, | 642 base = isolate_->code_range()->AllocateRawMemory(chunk_size, |
643 commit_size, | 643 commit_size, |
644 &chunk_size); | 644 &chunk_size); |
645 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), | 645 DCHECK(IsAligned(reinterpret_cast<intptr_t>(base), |
646 MemoryChunk::kAlignment)); | 646 MemoryChunk::kAlignment)); |
647 if (base == NULL) return NULL; | 647 if (base == NULL) return NULL; |
648 size_ += chunk_size; | 648 size_ += chunk_size; |
649 // Update executable memory size. | 649 // Update executable memory size. |
650 size_executable_ += chunk_size; | 650 size_executable_ += chunk_size; |
651 } else { | 651 } else { |
652 base = AllocateAlignedMemory(chunk_size, | 652 base = AllocateAlignedMemory(chunk_size, |
653 commit_size, | 653 commit_size, |
654 MemoryChunk::kAlignment, | 654 MemoryChunk::kAlignment, |
655 executable, | 655 executable, |
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
815 if (memory_allocation_callbacks_[i].callback == callback) return true; | 815 if (memory_allocation_callbacks_[i].callback == callback) return true; |
816 } | 816 } |
817 return false; | 817 return false; |
818 } | 818 } |
819 | 819 |
820 | 820 |
821 void MemoryAllocator::AddMemoryAllocationCallback( | 821 void MemoryAllocator::AddMemoryAllocationCallback( |
822 MemoryAllocationCallback callback, | 822 MemoryAllocationCallback callback, |
823 ObjectSpace space, | 823 ObjectSpace space, |
824 AllocationAction action) { | 824 AllocationAction action) { |
825 ASSERT(callback != NULL); | 825 DCHECK(callback != NULL); |
826 MemoryAllocationCallbackRegistration registration(callback, space, action); | 826 MemoryAllocationCallbackRegistration registration(callback, space, action); |
827 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback)); | 827 DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback)); |
828 return memory_allocation_callbacks_.Add(registration); | 828 return memory_allocation_callbacks_.Add(registration); |
829 } | 829 } |
830 | 830 |
831 | 831 |
832 void MemoryAllocator::RemoveMemoryAllocationCallback( | 832 void MemoryAllocator::RemoveMemoryAllocationCallback( |
833 MemoryAllocationCallback callback) { | 833 MemoryAllocationCallback callback) { |
834 ASSERT(callback != NULL); | 834 DCHECK(callback != NULL); |
835 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { | 835 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
836 if (memory_allocation_callbacks_[i].callback == callback) { | 836 if (memory_allocation_callbacks_[i].callback == callback) { |
837 memory_allocation_callbacks_.Remove(i); | 837 memory_allocation_callbacks_.Remove(i); |
838 return; | 838 return; |
839 } | 839 } |
840 } | 840 } |
841 UNREACHABLE(); | 841 UNREACHABLE(); |
842 } | 842 } |
843 | 843 |
844 | 844 |
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
982 PageIterator it(this); | 982 PageIterator it(this); |
983 while (it.has_next()) { | 983 while (it.has_next()) { |
984 size += it.next()->CommittedPhysicalMemory(); | 984 size += it.next()->CommittedPhysicalMemory(); |
985 } | 985 } |
986 return size; | 986 return size; |
987 } | 987 } |
988 | 988 |
989 | 989 |
990 Object* PagedSpace::FindObject(Address addr) { | 990 Object* PagedSpace::FindObject(Address addr) { |
991 // Note: this function can only be called on precisely swept spaces. | 991 // Note: this function can only be called on precisely swept spaces. |
992 ASSERT(!heap()->mark_compact_collector()->in_use()); | 992 DCHECK(!heap()->mark_compact_collector()->in_use()); |
993 | 993 |
994 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found. | 994 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found. |
995 | 995 |
996 Page* p = Page::FromAddress(addr); | 996 Page* p = Page::FromAddress(addr); |
997 HeapObjectIterator it(p, NULL); | 997 HeapObjectIterator it(p, NULL); |
998 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 998 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
999 Address cur = obj->address(); | 999 Address cur = obj->address(); |
1000 Address next = cur + obj->Size(); | 1000 Address next = cur + obj->Size(); |
1001 if ((cur <= addr) && (addr < next)) return obj; | 1001 if ((cur <= addr) && (addr < next)) return obj; |
1002 } | 1002 } |
1003 | 1003 |
1004 UNREACHABLE(); | 1004 UNREACHABLE(); |
1005 return Smi::FromInt(0); | 1005 return Smi::FromInt(0); |
1006 } | 1006 } |
1007 | 1007 |
1008 | 1008 |
1009 bool PagedSpace::CanExpand() { | 1009 bool PagedSpace::CanExpand() { |
1010 ASSERT(max_capacity_ % AreaSize() == 0); | 1010 DCHECK(max_capacity_ % AreaSize() == 0); |
1011 | 1011 |
1012 if (Capacity() == max_capacity_) return false; | 1012 if (Capacity() == max_capacity_) return false; |
1013 | 1013 |
1014 ASSERT(Capacity() < max_capacity_); | 1014 DCHECK(Capacity() < max_capacity_); |
1015 | 1015 |
1016 // Are we going to exceed capacity for this space? | 1016 // Are we going to exceed capacity for this space? |
1017 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; | 1017 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; |
1018 | 1018 |
1019 return true; | 1019 return true; |
1020 } | 1020 } |
1021 | 1021 |
1022 | 1022 |
1023 bool PagedSpace::Expand() { | 1023 bool PagedSpace::Expand() { |
1024 if (!CanExpand()) return false; | 1024 if (!CanExpand()) return false; |
1025 | 1025 |
1026 intptr_t size = AreaSize(); | 1026 intptr_t size = AreaSize(); |
1027 | 1027 |
1028 if (anchor_.next_page() == &anchor_) { | 1028 if (anchor_.next_page() == &anchor_) { |
1029 size = SizeOfFirstPage(); | 1029 size = SizeOfFirstPage(); |
1030 } | 1030 } |
1031 | 1031 |
1032 Page* p = heap()->isolate()->memory_allocator()->AllocatePage( | 1032 Page* p = heap()->isolate()->memory_allocator()->AllocatePage( |
1033 size, this, executable()); | 1033 size, this, executable()); |
1034 if (p == NULL) return false; | 1034 if (p == NULL) return false; |
1035 | 1035 |
1036 ASSERT(Capacity() <= max_capacity_); | 1036 DCHECK(Capacity() <= max_capacity_); |
1037 | 1037 |
1038 p->InsertAfter(anchor_.prev_page()); | 1038 p->InsertAfter(anchor_.prev_page()); |
1039 | 1039 |
1040 return true; | 1040 return true; |
1041 } | 1041 } |
1042 | 1042 |
1043 | 1043 |
1044 intptr_t PagedSpace::SizeOfFirstPage() { | 1044 intptr_t PagedSpace::SizeOfFirstPage() { |
1045 int size = 0; | 1045 int size = 0; |
1046 switch (identity()) { | 1046 switch (identity()) { |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1107 } | 1107 } |
1108 } | 1108 } |
1109 | 1109 |
1110 | 1110 |
1111 void PagedSpace::IncreaseCapacity(int size) { | 1111 void PagedSpace::IncreaseCapacity(int size) { |
1112 accounting_stats_.ExpandSpace(size); | 1112 accounting_stats_.ExpandSpace(size); |
1113 } | 1113 } |
1114 | 1114 |
1115 | 1115 |
1116 void PagedSpace::ReleasePage(Page* page) { | 1116 void PagedSpace::ReleasePage(Page* page) { |
1117 ASSERT(page->LiveBytes() == 0); | 1117 DCHECK(page->LiveBytes() == 0); |
1118 ASSERT(AreaSize() == page->area_size()); | 1118 DCHECK(AreaSize() == page->area_size()); |
1119 | 1119 |
1120 if (page->WasSwept()) { | 1120 if (page->WasSwept()) { |
1121 intptr_t size = free_list_.EvictFreeListItems(page); | 1121 intptr_t size = free_list_.EvictFreeListItems(page); |
1122 accounting_stats_.AllocateBytes(size); | 1122 accounting_stats_.AllocateBytes(size); |
1123 ASSERT_EQ(AreaSize(), static_cast<int>(size)); | 1123 DCHECK_EQ(AreaSize(), static_cast<int>(size)); |
1124 } else { | 1124 } else { |
1125 DecreaseUnsweptFreeBytes(page); | 1125 DecreaseUnsweptFreeBytes(page); |
1126 } | 1126 } |
1127 | 1127 |
1128 if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) { | 1128 if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) { |
1129 heap()->decrement_scan_on_scavenge_pages(); | 1129 heap()->decrement_scan_on_scavenge_pages(); |
1130 page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE); | 1130 page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE); |
1131 } | 1131 } |
1132 | 1132 |
1133 ASSERT(!free_list_.ContainsPageFreeListItems(page)); | 1133 DCHECK(!free_list_.ContainsPageFreeListItems(page)); |
1134 | 1134 |
1135 if (Page::FromAllocationTop(allocation_info_.top()) == page) { | 1135 if (Page::FromAllocationTop(allocation_info_.top()) == page) { |
1136 allocation_info_.set_top(NULL); | 1136 allocation_info_.set_top(NULL); |
1137 allocation_info_.set_limit(NULL); | 1137 allocation_info_.set_limit(NULL); |
1138 } | 1138 } |
1139 | 1139 |
1140 page->Unlink(); | 1140 page->Unlink(); |
1141 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { | 1141 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { |
1142 heap()->isolate()->memory_allocator()->Free(page); | 1142 heap()->isolate()->memory_allocator()->Free(page); |
1143 } else { | 1143 } else { |
1144 heap()->QueueMemoryChunkForFree(page); | 1144 heap()->QueueMemoryChunkForFree(page); |
1145 } | 1145 } |
1146 | 1146 |
1147 ASSERT(Capacity() > 0); | 1147 DCHECK(Capacity() > 0); |
1148 accounting_stats_.ShrinkSpace(AreaSize()); | 1148 accounting_stats_.ShrinkSpace(AreaSize()); |
1149 } | 1149 } |
1150 | 1150 |
1151 | 1151 |
1152 void PagedSpace::CreateEmergencyMemory() { | 1152 void PagedSpace::CreateEmergencyMemory() { |
1153 emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk( | 1153 emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk( |
1154 AreaSize(), AreaSize(), executable(), this); | 1154 AreaSize(), AreaSize(), executable(), this); |
1155 } | 1155 } |
1156 | 1156 |
1157 | 1157 |
1158 void PagedSpace::FreeEmergencyMemory() { | 1158 void PagedSpace::FreeEmergencyMemory() { |
1159 Page* page = static_cast<Page*>(emergency_memory_); | 1159 Page* page = static_cast<Page*>(emergency_memory_); |
1160 ASSERT(page->LiveBytes() == 0); | 1160 DCHECK(page->LiveBytes() == 0); |
1161 ASSERT(AreaSize() == page->area_size()); | 1161 DCHECK(AreaSize() == page->area_size()); |
1162 ASSERT(!free_list_.ContainsPageFreeListItems(page)); | 1162 DCHECK(!free_list_.ContainsPageFreeListItems(page)); |
1163 heap()->isolate()->memory_allocator()->Free(page); | 1163 heap()->isolate()->memory_allocator()->Free(page); |
1164 emergency_memory_ = NULL; | 1164 emergency_memory_ = NULL; |
1165 } | 1165 } |
1166 | 1166 |
1167 | 1167 |
1168 void PagedSpace::UseEmergencyMemory() { | 1168 void PagedSpace::UseEmergencyMemory() { |
1169 Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this); | 1169 Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this); |
1170 page->InsertAfter(anchor_.prev_page()); | 1170 page->InsertAfter(anchor_.prev_page()); |
1171 emergency_memory_ = NULL; | 1171 emergency_memory_ = NULL; |
1172 } | 1172 } |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1241 size_t size = 2 * reserved_semispace_capacity; | 1241 size_t size = 2 * reserved_semispace_capacity; |
1242 Address base = | 1242 Address base = |
1243 heap()->isolate()->memory_allocator()->ReserveAlignedMemory( | 1243 heap()->isolate()->memory_allocator()->ReserveAlignedMemory( |
1244 size, size, &reservation_); | 1244 size, size, &reservation_); |
1245 if (base == NULL) return false; | 1245 if (base == NULL) return false; |
1246 | 1246 |
1247 chunk_base_ = base; | 1247 chunk_base_ = base; |
1248 chunk_size_ = static_cast<uintptr_t>(size); | 1248 chunk_size_ = static_cast<uintptr_t>(size); |
1249 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); | 1249 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); |
1250 | 1250 |
1251 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); | 1251 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity); |
1252 ASSERT(IsPowerOf2(maximum_semispace_capacity)); | 1252 DCHECK(IsPowerOf2(maximum_semispace_capacity)); |
1253 | 1253 |
1254 // Allocate and set up the histogram arrays if necessary. | 1254 // Allocate and set up the histogram arrays if necessary. |
1255 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | 1255 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
1256 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | 1256 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
1257 | 1257 |
1258 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ | 1258 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ |
1259 promoted_histogram_[name].set_name(#name); | 1259 promoted_histogram_[name].set_name(#name); |
1260 INSTANCE_TYPE_LIST(SET_NAME) | 1260 INSTANCE_TYPE_LIST(SET_NAME) |
1261 #undef SET_NAME | 1261 #undef SET_NAME |
1262 | 1262 |
1263 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize()); | 1263 DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize()); |
1264 ASSERT(static_cast<intptr_t>(chunk_size_) >= | 1264 DCHECK(static_cast<intptr_t>(chunk_size_) >= |
1265 2 * heap()->ReservedSemiSpaceSize()); | 1265 2 * heap()->ReservedSemiSpaceSize()); |
1266 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); | 1266 DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); |
1267 | 1267 |
1268 to_space_.SetUp(chunk_base_, | 1268 to_space_.SetUp(chunk_base_, |
1269 initial_semispace_capacity, | 1269 initial_semispace_capacity, |
1270 maximum_semispace_capacity); | 1270 maximum_semispace_capacity); |
1271 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity, | 1271 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity, |
1272 initial_semispace_capacity, | 1272 initial_semispace_capacity, |
1273 maximum_semispace_capacity); | 1273 maximum_semispace_capacity); |
1274 if (!to_space_.Commit()) { | 1274 if (!to_space_.Commit()) { |
1275 return false; | 1275 return false; |
1276 } | 1276 } |
1277 ASSERT(!from_space_.is_committed()); // No need to use memory yet. | 1277 DCHECK(!from_space_.is_committed()); // No need to use memory yet. |
1278 | 1278 |
1279 start_ = chunk_base_; | 1279 start_ = chunk_base_; |
1280 address_mask_ = ~(2 * reserved_semispace_capacity - 1); | 1280 address_mask_ = ~(2 * reserved_semispace_capacity - 1); |
1281 object_mask_ = address_mask_ | kHeapObjectTagMask; | 1281 object_mask_ = address_mask_ | kHeapObjectTagMask; |
1282 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag; | 1282 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag; |
1283 | 1283 |
1284 ResetAllocationInfo(); | 1284 ResetAllocationInfo(); |
1285 | 1285 |
1286 return true; | 1286 return true; |
1287 } | 1287 } |
(...skipping 11 matching lines...) Expand all Loading... |
1299 | 1299 |
1300 start_ = NULL; | 1300 start_ = NULL; |
1301 allocation_info_.set_top(NULL); | 1301 allocation_info_.set_top(NULL); |
1302 allocation_info_.set_limit(NULL); | 1302 allocation_info_.set_limit(NULL); |
1303 | 1303 |
1304 to_space_.TearDown(); | 1304 to_space_.TearDown(); |
1305 from_space_.TearDown(); | 1305 from_space_.TearDown(); |
1306 | 1306 |
1307 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); | 1307 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); |
1308 | 1308 |
1309 ASSERT(reservation_.IsReserved()); | 1309 DCHECK(reservation_.IsReserved()); |
1310 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, | 1310 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, |
1311 NOT_EXECUTABLE); | 1311 NOT_EXECUTABLE); |
1312 chunk_base_ = NULL; | 1312 chunk_base_ = NULL; |
1313 chunk_size_ = 0; | 1313 chunk_size_ = 0; |
1314 } | 1314 } |
1315 | 1315 |
1316 | 1316 |
1317 void NewSpace::Flip() { | 1317 void NewSpace::Flip() { |
1318 SemiSpace::Swap(&from_space_, &to_space_); | 1318 SemiSpace::Swap(&from_space_, &to_space_); |
1319 } | 1319 } |
1320 | 1320 |
1321 | 1321 |
1322 void NewSpace::Grow() { | 1322 void NewSpace::Grow() { |
1323 // Double the semispace size but only up to maximum capacity. | 1323 // Double the semispace size but only up to maximum capacity. |
1324 ASSERT(Capacity() < MaximumCapacity()); | 1324 DCHECK(Capacity() < MaximumCapacity()); |
1325 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity())); | 1325 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity())); |
1326 if (to_space_.GrowTo(new_capacity)) { | 1326 if (to_space_.GrowTo(new_capacity)) { |
1327 // Only grow from space if we managed to grow to-space. | 1327 // Only grow from space if we managed to grow to-space. |
1328 if (!from_space_.GrowTo(new_capacity)) { | 1328 if (!from_space_.GrowTo(new_capacity)) { |
1329 // If we managed to grow to-space but couldn't grow from-space, | 1329 // If we managed to grow to-space but couldn't grow from-space, |
1330 // attempt to shrink to-space. | 1330 // attempt to shrink to-space. |
1331 if (!to_space_.ShrinkTo(from_space_.Capacity())) { | 1331 if (!to_space_.ShrinkTo(from_space_.Capacity())) { |
1332 // We are in an inconsistent state because we could not | 1332 // We are in an inconsistent state because we could not |
1333 // commit/uncommit memory from new space. | 1333 // commit/uncommit memory from new space. |
1334 V8::FatalProcessOutOfMemory("Failed to grow new space."); | 1334 V8::FatalProcessOutOfMemory("Failed to grow new space."); |
1335 } | 1335 } |
1336 } | 1336 } |
1337 } | 1337 } |
1338 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1338 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
1339 } | 1339 } |
1340 | 1340 |
1341 | 1341 |
1342 void NewSpace::Shrink() { | 1342 void NewSpace::Shrink() { |
1343 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt()); | 1343 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt()); |
1344 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize); | 1344 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize); |
1345 if (rounded_new_capacity < Capacity() && | 1345 if (rounded_new_capacity < Capacity() && |
1346 to_space_.ShrinkTo(rounded_new_capacity)) { | 1346 to_space_.ShrinkTo(rounded_new_capacity)) { |
1347 // Only shrink from-space if we managed to shrink to-space. | 1347 // Only shrink from-space if we managed to shrink to-space. |
1348 from_space_.Reset(); | 1348 from_space_.Reset(); |
1349 if (!from_space_.ShrinkTo(rounded_new_capacity)) { | 1349 if (!from_space_.ShrinkTo(rounded_new_capacity)) { |
1350 // If we managed to shrink to-space but couldn't shrink from | 1350 // If we managed to shrink to-space but couldn't shrink from |
1351 // space, attempt to grow to-space again. | 1351 // space, attempt to grow to-space again. |
1352 if (!to_space_.GrowTo(from_space_.Capacity())) { | 1352 if (!to_space_.GrowTo(from_space_.Capacity())) { |
1353 // We are in an inconsistent state because we could not | 1353 // We are in an inconsistent state because we could not |
1354 // commit/uncommit memory from new space. | 1354 // commit/uncommit memory from new space. |
1355 V8::FatalProcessOutOfMemory("Failed to shrink new space."); | 1355 V8::FatalProcessOutOfMemory("Failed to shrink new space."); |
1356 } | 1356 } |
1357 } | 1357 } |
1358 } | 1358 } |
1359 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1359 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
1360 } | 1360 } |
1361 | 1361 |
1362 | 1362 |
1363 void NewSpace::UpdateAllocationInfo() { | 1363 void NewSpace::UpdateAllocationInfo() { |
1364 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | 1364 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
1365 allocation_info_.set_top(to_space_.page_low()); | 1365 allocation_info_.set_top(to_space_.page_low()); |
1366 allocation_info_.set_limit(to_space_.page_high()); | 1366 allocation_info_.set_limit(to_space_.page_high()); |
1367 UpdateInlineAllocationLimit(0); | 1367 UpdateInlineAllocationLimit(0); |
1368 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1368 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
1369 } | 1369 } |
1370 | 1370 |
1371 | 1371 |
1372 void NewSpace::ResetAllocationInfo() { | 1372 void NewSpace::ResetAllocationInfo() { |
1373 to_space_.Reset(); | 1373 to_space_.Reset(); |
1374 UpdateAllocationInfo(); | 1374 UpdateAllocationInfo(); |
1375 pages_used_ = 0; | 1375 pages_used_ = 0; |
1376 // Clear all mark-bits in the to-space. | 1376 // Clear all mark-bits in the to-space. |
1377 NewSpacePageIterator it(&to_space_); | 1377 NewSpacePageIterator it(&to_space_); |
1378 while (it.has_next()) { | 1378 while (it.has_next()) { |
(...skipping 11 matching lines...) Expand all Loading... |
1390 } else if (inline_allocation_limit_step() == 0) { | 1390 } else if (inline_allocation_limit_step() == 0) { |
1391 // Normal limit is the end of the current page. | 1391 // Normal limit is the end of the current page. |
1392 allocation_info_.set_limit(to_space_.page_high()); | 1392 allocation_info_.set_limit(to_space_.page_high()); |
1393 } else { | 1393 } else { |
1394 // Lower limit during incremental marking. | 1394 // Lower limit during incremental marking. |
1395 Address high = to_space_.page_high(); | 1395 Address high = to_space_.page_high(); |
1396 Address new_top = allocation_info_.top() + size_in_bytes; | 1396 Address new_top = allocation_info_.top() + size_in_bytes; |
1397 Address new_limit = new_top + inline_allocation_limit_step_; | 1397 Address new_limit = new_top + inline_allocation_limit_step_; |
1398 allocation_info_.set_limit(Min(new_limit, high)); | 1398 allocation_info_.set_limit(Min(new_limit, high)); |
1399 } | 1399 } |
1400 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1400 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
1401 } | 1401 } |
1402 | 1402 |
1403 | 1403 |
1404 bool NewSpace::AddFreshPage() { | 1404 bool NewSpace::AddFreshPage() { |
1405 Address top = allocation_info_.top(); | 1405 Address top = allocation_info_.top(); |
1406 if (NewSpacePage::IsAtStart(top)) { | 1406 if (NewSpacePage::IsAtStart(top)) { |
1407 // The current page is already empty. Don't try to make another. | 1407 // The current page is already empty. Don't try to make another. |
1408 | 1408 |
1409 // We should only get here if someone asks to allocate more | 1409 // We should only get here if someone asks to allocate more |
1410 // than what can be stored in a single page. | 1410 // than what can be stored in a single page. |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1458 return AllocationResult::Retry(); | 1458 return AllocationResult::Retry(); |
1459 } | 1459 } |
1460 } | 1460 } |
1461 | 1461 |
1462 | 1462 |
1463 #ifdef VERIFY_HEAP | 1463 #ifdef VERIFY_HEAP |
1464 // We do not use the SemiSpaceIterator because verification doesn't assume | 1464 // We do not use the SemiSpaceIterator because verification doesn't assume |
1465 // that it works (it depends on the invariants we are checking). | 1465 // that it works (it depends on the invariants we are checking). |
1466 void NewSpace::Verify() { | 1466 void NewSpace::Verify() { |
1467 // The allocation pointer should be in the space or at the very end. | 1467 // The allocation pointer should be in the space or at the very end. |
1468 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1468 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
1469 | 1469 |
1470 // There should be objects packed in from the low address up to the | 1470 // There should be objects packed in from the low address up to the |
1471 // allocation pointer. | 1471 // allocation pointer. |
1472 Address current = to_space_.first_page()->area_start(); | 1472 Address current = to_space_.first_page()->area_start(); |
1473 CHECK_EQ(current, to_space_.space_start()); | 1473 CHECK_EQ(current, to_space_.space_start()); |
1474 | 1474 |
1475 while (current != top()) { | 1475 while (current != top()) { |
1476 if (!NewSpacePage::IsAtEnd(current)) { | 1476 if (!NewSpacePage::IsAtEnd(current)) { |
1477 // The allocation pointer should not be in the middle of an object. | 1477 // The allocation pointer should not be in the middle of an object. |
1478 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) || | 1478 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) || |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1521 | 1521 |
1522 void SemiSpace::SetUp(Address start, | 1522 void SemiSpace::SetUp(Address start, |
1523 int initial_capacity, | 1523 int initial_capacity, |
1524 int maximum_capacity) { | 1524 int maximum_capacity) { |
1525 // Creates a space in the young generation. The constructor does not | 1525 // Creates a space in the young generation. The constructor does not |
1526 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of | 1526 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of |
1527 // memory of size 'capacity' when set up, and does not grow or shrink | 1527 // memory of size 'capacity' when set up, and does not grow or shrink |
1528 // otherwise. In the mark-compact collector, the memory region of the from | 1528 // otherwise. In the mark-compact collector, the memory region of the from |
1529 // space is used as the marking stack. It requires contiguous memory | 1529 // space is used as the marking stack. It requires contiguous memory |
1530 // addresses. | 1530 // addresses. |
1531 ASSERT(maximum_capacity >= Page::kPageSize); | 1531 DCHECK(maximum_capacity >= Page::kPageSize); |
1532 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); | 1532 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); |
1533 capacity_ = initial_capacity; | 1533 capacity_ = initial_capacity; |
1534 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); | 1534 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); |
1535 maximum_committed_ = 0; | 1535 maximum_committed_ = 0; |
1536 committed_ = false; | 1536 committed_ = false; |
1537 start_ = start; | 1537 start_ = start; |
1538 address_mask_ = ~(maximum_capacity - 1); | 1538 address_mask_ = ~(maximum_capacity - 1); |
1539 object_mask_ = address_mask_ | kHeapObjectTagMask; | 1539 object_mask_ = address_mask_ | kHeapObjectTagMask; |
1540 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; | 1540 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; |
1541 age_mark_ = start_; | 1541 age_mark_ = start_; |
1542 } | 1542 } |
1543 | 1543 |
1544 | 1544 |
1545 void SemiSpace::TearDown() { | 1545 void SemiSpace::TearDown() { |
1546 start_ = NULL; | 1546 start_ = NULL; |
1547 capacity_ = 0; | 1547 capacity_ = 0; |
1548 } | 1548 } |
1549 | 1549 |
1550 | 1550 |
1551 bool SemiSpace::Commit() { | 1551 bool SemiSpace::Commit() { |
1552 ASSERT(!is_committed()); | 1552 DCHECK(!is_committed()); |
1553 int pages = capacity_ / Page::kPageSize; | 1553 int pages = capacity_ / Page::kPageSize; |
1554 if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, | 1554 if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, |
1555 capacity_, | 1555 capacity_, |
1556 executable())) { | 1556 executable())) { |
1557 return false; | 1557 return false; |
1558 } | 1558 } |
1559 | 1559 |
1560 NewSpacePage* current = anchor(); | 1560 NewSpacePage* current = anchor(); |
1561 for (int i = 0; i < pages; i++) { | 1561 for (int i = 0; i < pages; i++) { |
1562 NewSpacePage* new_page = | 1562 NewSpacePage* new_page = |
1563 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); | 1563 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); |
1564 new_page->InsertAfter(current); | 1564 new_page->InsertAfter(current); |
1565 current = new_page; | 1565 current = new_page; |
1566 } | 1566 } |
1567 | 1567 |
1568 SetCapacity(capacity_); | 1568 SetCapacity(capacity_); |
1569 committed_ = true; | 1569 committed_ = true; |
1570 Reset(); | 1570 Reset(); |
1571 return true; | 1571 return true; |
1572 } | 1572 } |
1573 | 1573 |
1574 | 1574 |
1575 bool SemiSpace::Uncommit() { | 1575 bool SemiSpace::Uncommit() { |
1576 ASSERT(is_committed()); | 1576 DCHECK(is_committed()); |
1577 Address start = start_ + maximum_capacity_ - capacity_; | 1577 Address start = start_ + maximum_capacity_ - capacity_; |
1578 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) { | 1578 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) { |
1579 return false; | 1579 return false; |
1580 } | 1580 } |
1581 anchor()->set_next_page(anchor()); | 1581 anchor()->set_next_page(anchor()); |
1582 anchor()->set_prev_page(anchor()); | 1582 anchor()->set_prev_page(anchor()); |
1583 | 1583 |
1584 committed_ = false; | 1584 committed_ = false; |
1585 return true; | 1585 return true; |
1586 } | 1586 } |
1587 | 1587 |
1588 | 1588 |
1589 size_t SemiSpace::CommittedPhysicalMemory() { | 1589 size_t SemiSpace::CommittedPhysicalMemory() { |
1590 if (!is_committed()) return 0; | 1590 if (!is_committed()) return 0; |
1591 size_t size = 0; | 1591 size_t size = 0; |
1592 NewSpacePageIterator it(this); | 1592 NewSpacePageIterator it(this); |
1593 while (it.has_next()) { | 1593 while (it.has_next()) { |
1594 size += it.next()->CommittedPhysicalMemory(); | 1594 size += it.next()->CommittedPhysicalMemory(); |
1595 } | 1595 } |
1596 return size; | 1596 return size; |
1597 } | 1597 } |
1598 | 1598 |
1599 | 1599 |
1600 bool SemiSpace::GrowTo(int new_capacity) { | 1600 bool SemiSpace::GrowTo(int new_capacity) { |
1601 if (!is_committed()) { | 1601 if (!is_committed()) { |
1602 if (!Commit()) return false; | 1602 if (!Commit()) return false; |
1603 } | 1603 } |
1604 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); | 1604 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0); |
1605 ASSERT(new_capacity <= maximum_capacity_); | 1605 DCHECK(new_capacity <= maximum_capacity_); |
1606 ASSERT(new_capacity > capacity_); | 1606 DCHECK(new_capacity > capacity_); |
1607 int pages_before = capacity_ / Page::kPageSize; | 1607 int pages_before = capacity_ / Page::kPageSize; |
1608 int pages_after = new_capacity / Page::kPageSize; | 1608 int pages_after = new_capacity / Page::kPageSize; |
1609 | 1609 |
1610 size_t delta = new_capacity - capacity_; | 1610 size_t delta = new_capacity - capacity_; |
1611 | 1611 |
1612 ASSERT(IsAligned(delta, base::OS::AllocateAlignment())); | 1612 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); |
1613 if (!heap()->isolate()->memory_allocator()->CommitBlock( | 1613 if (!heap()->isolate()->memory_allocator()->CommitBlock( |
1614 start_ + capacity_, delta, executable())) { | 1614 start_ + capacity_, delta, executable())) { |
1615 return false; | 1615 return false; |
1616 } | 1616 } |
1617 SetCapacity(new_capacity); | 1617 SetCapacity(new_capacity); |
1618 NewSpacePage* last_page = anchor()->prev_page(); | 1618 NewSpacePage* last_page = anchor()->prev_page(); |
1619 ASSERT(last_page != anchor()); | 1619 DCHECK(last_page != anchor()); |
1620 for (int i = pages_before; i < pages_after; i++) { | 1620 for (int i = pages_before; i < pages_after; i++) { |
1621 Address page_address = start_ + i * Page::kPageSize; | 1621 Address page_address = start_ + i * Page::kPageSize; |
1622 NewSpacePage* new_page = NewSpacePage::Initialize(heap(), | 1622 NewSpacePage* new_page = NewSpacePage::Initialize(heap(), |
1623 page_address, | 1623 page_address, |
1624 this); | 1624 this); |
1625 new_page->InsertAfter(last_page); | 1625 new_page->InsertAfter(last_page); |
1626 Bitmap::Clear(new_page); | 1626 Bitmap::Clear(new_page); |
1627 // Duplicate the flags that was set on the old page. | 1627 // Duplicate the flags that was set on the old page. |
1628 new_page->SetFlags(last_page->GetFlags(), | 1628 new_page->SetFlags(last_page->GetFlags(), |
1629 NewSpacePage::kCopyOnFlipFlagsMask); | 1629 NewSpacePage::kCopyOnFlipFlagsMask); |
1630 last_page = new_page; | 1630 last_page = new_page; |
1631 } | 1631 } |
1632 return true; | 1632 return true; |
1633 } | 1633 } |
1634 | 1634 |
1635 | 1635 |
1636 bool SemiSpace::ShrinkTo(int new_capacity) { | 1636 bool SemiSpace::ShrinkTo(int new_capacity) { |
1637 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); | 1637 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0); |
1638 ASSERT(new_capacity >= initial_capacity_); | 1638 DCHECK(new_capacity >= initial_capacity_); |
1639 ASSERT(new_capacity < capacity_); | 1639 DCHECK(new_capacity < capacity_); |
1640 if (is_committed()) { | 1640 if (is_committed()) { |
1641 size_t delta = capacity_ - new_capacity; | 1641 size_t delta = capacity_ - new_capacity; |
1642 ASSERT(IsAligned(delta, base::OS::AllocateAlignment())); | 1642 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); |
1643 | 1643 |
1644 MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); | 1644 MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); |
1645 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) { | 1645 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) { |
1646 return false; | 1646 return false; |
1647 } | 1647 } |
1648 | 1648 |
1649 int pages_after = new_capacity / Page::kPageSize; | 1649 int pages_after = new_capacity / Page::kPageSize; |
1650 NewSpacePage* new_last_page = | 1650 NewSpacePage* new_last_page = |
1651 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize); | 1651 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize); |
1652 new_last_page->set_next_page(anchor()); | 1652 new_last_page->set_next_page(anchor()); |
1653 anchor()->set_prev_page(new_last_page); | 1653 anchor()->set_prev_page(new_last_page); |
1654 ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page)); | 1654 DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page)); |
1655 } | 1655 } |
1656 | 1656 |
1657 SetCapacity(new_capacity); | 1657 SetCapacity(new_capacity); |
1658 | 1658 |
1659 return true; | 1659 return true; |
1660 } | 1660 } |
1661 | 1661 |
1662 | 1662 |
1663 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) { | 1663 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) { |
1664 anchor_.set_owner(this); | 1664 anchor_.set_owner(this); |
(...skipping 10 matching lines...) Expand all Loading... |
1675 page->SetFlags(flags, mask); | 1675 page->SetFlags(flags, mask); |
1676 if (becomes_to_space) { | 1676 if (becomes_to_space) { |
1677 page->ClearFlag(MemoryChunk::IN_FROM_SPACE); | 1677 page->ClearFlag(MemoryChunk::IN_FROM_SPACE); |
1678 page->SetFlag(MemoryChunk::IN_TO_SPACE); | 1678 page->SetFlag(MemoryChunk::IN_TO_SPACE); |
1679 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); | 1679 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); |
1680 page->ResetLiveBytes(); | 1680 page->ResetLiveBytes(); |
1681 } else { | 1681 } else { |
1682 page->SetFlag(MemoryChunk::IN_FROM_SPACE); | 1682 page->SetFlag(MemoryChunk::IN_FROM_SPACE); |
1683 page->ClearFlag(MemoryChunk::IN_TO_SPACE); | 1683 page->ClearFlag(MemoryChunk::IN_TO_SPACE); |
1684 } | 1684 } |
1685 ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); | 1685 DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); |
1686 ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) || | 1686 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) || |
1687 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE)); | 1687 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE)); |
1688 page = page->next_page(); | 1688 page = page->next_page(); |
1689 } | 1689 } |
1690 } | 1690 } |
1691 | 1691 |
1692 | 1692 |
1693 void SemiSpace::Reset() { | 1693 void SemiSpace::Reset() { |
1694 ASSERT(anchor_.next_page() != &anchor_); | 1694 DCHECK(anchor_.next_page() != &anchor_); |
1695 current_page_ = anchor_.next_page(); | 1695 current_page_ = anchor_.next_page(); |
1696 } | 1696 } |
1697 | 1697 |
1698 | 1698 |
1699 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { | 1699 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { |
1700 // We won't be swapping semispaces without data in them. | 1700 // We won't be swapping semispaces without data in them. |
1701 ASSERT(from->anchor_.next_page() != &from->anchor_); | 1701 DCHECK(from->anchor_.next_page() != &from->anchor_); |
1702 ASSERT(to->anchor_.next_page() != &to->anchor_); | 1702 DCHECK(to->anchor_.next_page() != &to->anchor_); |
1703 | 1703 |
1704 // Swap bits. | 1704 // Swap bits. |
1705 SemiSpace tmp = *from; | 1705 SemiSpace tmp = *from; |
1706 *from = *to; | 1706 *from = *to; |
1707 *to = tmp; | 1707 *to = tmp; |
1708 | 1708 |
1709 // Fixup back-pointers to the page list anchor now that its address | 1709 // Fixup back-pointers to the page list anchor now that its address |
1710 // has changed. | 1710 // has changed. |
1711 // Swap to/from-space bits on pages. | 1711 // Swap to/from-space bits on pages. |
1712 // Copy GC flags from old active space (from-space) to new (to-space). | 1712 // Copy GC flags from old active space (from-space) to new (to-space). |
1713 intptr_t flags = from->current_page()->GetFlags(); | 1713 intptr_t flags = from->current_page()->GetFlags(); |
1714 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask); | 1714 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask); |
1715 | 1715 |
1716 from->FlipPages(0, 0); | 1716 from->FlipPages(0, 0); |
1717 } | 1717 } |
1718 | 1718 |
1719 | 1719 |
1720 void SemiSpace::SetCapacity(int new_capacity) { | 1720 void SemiSpace::SetCapacity(int new_capacity) { |
1721 capacity_ = new_capacity; | 1721 capacity_ = new_capacity; |
1722 if (capacity_ > maximum_committed_) { | 1722 if (capacity_ > maximum_committed_) { |
1723 maximum_committed_ = capacity_; | 1723 maximum_committed_ = capacity_; |
1724 } | 1724 } |
1725 } | 1725 } |
1726 | 1726 |
1727 | 1727 |
1728 void SemiSpace::set_age_mark(Address mark) { | 1728 void SemiSpace::set_age_mark(Address mark) { |
1729 ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this); | 1729 DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this); |
1730 age_mark_ = mark; | 1730 age_mark_ = mark; |
1731 // Mark all pages up to the one containing mark. | 1731 // Mark all pages up to the one containing mark. |
1732 NewSpacePageIterator it(space_start(), mark); | 1732 NewSpacePageIterator it(space_start(), mark); |
1733 while (it.has_next()) { | 1733 while (it.has_next()) { |
1734 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); | 1734 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); |
1735 } | 1735 } |
1736 } | 1736 } |
1737 | 1737 |
1738 | 1738 |
1739 #ifdef DEBUG | 1739 #ifdef DEBUG |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1859 code_kind_statistics[i]); | 1859 code_kind_statistics[i]); |
1860 } | 1860 } |
1861 } | 1861 } |
1862 PrintF("\n"); | 1862 PrintF("\n"); |
1863 } | 1863 } |
1864 | 1864 |
1865 | 1865 |
1866 static int CollectHistogramInfo(HeapObject* obj) { | 1866 static int CollectHistogramInfo(HeapObject* obj) { |
1867 Isolate* isolate = obj->GetIsolate(); | 1867 Isolate* isolate = obj->GetIsolate(); |
1868 InstanceType type = obj->map()->instance_type(); | 1868 InstanceType type = obj->map()->instance_type(); |
1869 ASSERT(0 <= type && type <= LAST_TYPE); | 1869 DCHECK(0 <= type && type <= LAST_TYPE); |
1870 ASSERT(isolate->heap_histograms()[type].name() != NULL); | 1870 DCHECK(isolate->heap_histograms()[type].name() != NULL); |
1871 isolate->heap_histograms()[type].increment_number(1); | 1871 isolate->heap_histograms()[type].increment_number(1); |
1872 isolate->heap_histograms()[type].increment_bytes(obj->Size()); | 1872 isolate->heap_histograms()[type].increment_bytes(obj->Size()); |
1873 | 1873 |
1874 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) { | 1874 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) { |
1875 JSObject::cast(obj)->IncrementSpillStatistics( | 1875 JSObject::cast(obj)->IncrementSpillStatistics( |
1876 isolate->js_spill_information()); | 1876 isolate->js_spill_information()); |
1877 } | 1877 } |
1878 | 1878 |
1879 return obj->Size(); | 1879 return obj->Size(); |
1880 } | 1880 } |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1983 if (FLAG_log_gc) { | 1983 if (FLAG_log_gc) { |
1984 Isolate* isolate = heap()->isolate(); | 1984 Isolate* isolate = heap()->isolate(); |
1985 DoReportStatistics(isolate, allocated_histogram_, "allocated"); | 1985 DoReportStatistics(isolate, allocated_histogram_, "allocated"); |
1986 DoReportStatistics(isolate, promoted_histogram_, "promoted"); | 1986 DoReportStatistics(isolate, promoted_histogram_, "promoted"); |
1987 } | 1987 } |
1988 } | 1988 } |
1989 | 1989 |
1990 | 1990 |
1991 void NewSpace::RecordAllocation(HeapObject* obj) { | 1991 void NewSpace::RecordAllocation(HeapObject* obj) { |
1992 InstanceType type = obj->map()->instance_type(); | 1992 InstanceType type = obj->map()->instance_type(); |
1993 ASSERT(0 <= type && type <= LAST_TYPE); | 1993 DCHECK(0 <= type && type <= LAST_TYPE); |
1994 allocated_histogram_[type].increment_number(1); | 1994 allocated_histogram_[type].increment_number(1); |
1995 allocated_histogram_[type].increment_bytes(obj->Size()); | 1995 allocated_histogram_[type].increment_bytes(obj->Size()); |
1996 } | 1996 } |
1997 | 1997 |
1998 | 1998 |
1999 void NewSpace::RecordPromotion(HeapObject* obj) { | 1999 void NewSpace::RecordPromotion(HeapObject* obj) { |
2000 InstanceType type = obj->map()->instance_type(); | 2000 InstanceType type = obj->map()->instance_type(); |
2001 ASSERT(0 <= type && type <= LAST_TYPE); | 2001 DCHECK(0 <= type && type <= LAST_TYPE); |
2002 promoted_histogram_[type].increment_number(1); | 2002 promoted_histogram_[type].increment_number(1); |
2003 promoted_histogram_[type].increment_bytes(obj->Size()); | 2003 promoted_histogram_[type].increment_bytes(obj->Size()); |
2004 } | 2004 } |
2005 | 2005 |
2006 | 2006 |
2007 size_t NewSpace::CommittedPhysicalMemory() { | 2007 size_t NewSpace::CommittedPhysicalMemory() { |
2008 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); | 2008 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); |
2009 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | 2009 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
2010 size_t size = to_space_.CommittedPhysicalMemory(); | 2010 size_t size = to_space_.CommittedPhysicalMemory(); |
2011 if (from_space_.is_committed()) { | 2011 if (from_space_.is_committed()) { |
2012 size += from_space_.CommittedPhysicalMemory(); | 2012 size += from_space_.CommittedPhysicalMemory(); |
2013 } | 2013 } |
2014 return size; | 2014 return size; |
2015 } | 2015 } |
2016 | 2016 |
2017 | 2017 |
2018 // ----------------------------------------------------------------------------- | 2018 // ----------------------------------------------------------------------------- |
2019 // Free lists for old object spaces implementation | 2019 // Free lists for old object spaces implementation |
2020 | 2020 |
2021 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { | 2021 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { |
2022 ASSERT(size_in_bytes > 0); | 2022 DCHECK(size_in_bytes > 0); |
2023 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 2023 DCHECK(IsAligned(size_in_bytes, kPointerSize)); |
2024 | 2024 |
2025 // We write a map and possibly size information to the block. If the block | 2025 // We write a map and possibly size information to the block. If the block |
2026 // is big enough to be a FreeSpace with at least one extra word (the next | 2026 // is big enough to be a FreeSpace with at least one extra word (the next |
2027 // pointer), we set its map to be the free space map and its size to an | 2027 // pointer), we set its map to be the free space map and its size to an |
2028 // appropriate array length for the desired size from HeapObject::Size(). | 2028 // appropriate array length for the desired size from HeapObject::Size(). |
2029 // If the block is too small (eg, one or two words), to hold both a size | 2029 // If the block is too small (eg, one or two words), to hold both a size |
2030 // field and a next pointer, we give it a filler map that gives it the | 2030 // field and a next pointer, we give it a filler map that gives it the |
2031 // correct size. | 2031 // correct size. |
2032 if (size_in_bytes > FreeSpace::kHeaderSize) { | 2032 if (size_in_bytes > FreeSpace::kHeaderSize) { |
2033 // Can't use FreeSpace::cast because it fails during deserialization. | 2033 // Can't use FreeSpace::cast because it fails during deserialization. |
2034 // We have to set the size first with a release store before we store | 2034 // We have to set the size first with a release store before we store |
2035 // the map because a concurrent store buffer scan on scavenge must not | 2035 // the map because a concurrent store buffer scan on scavenge must not |
2036 // observe a map with an invalid size. | 2036 // observe a map with an invalid size. |
2037 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); | 2037 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); |
2038 this_as_free_space->nobarrier_set_size(size_in_bytes); | 2038 this_as_free_space->nobarrier_set_size(size_in_bytes); |
2039 synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map()); | 2039 synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map()); |
2040 } else if (size_in_bytes == kPointerSize) { | 2040 } else if (size_in_bytes == kPointerSize) { |
2041 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map()); | 2041 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map()); |
2042 } else if (size_in_bytes == 2 * kPointerSize) { | 2042 } else if (size_in_bytes == 2 * kPointerSize) { |
2043 set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map()); | 2043 set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map()); |
2044 } else { | 2044 } else { |
2045 UNREACHABLE(); | 2045 UNREACHABLE(); |
2046 } | 2046 } |
2047 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during | 2047 // We would like to DCHECK(Size() == size_in_bytes) but this would fail during |
2048 // deserialization because the free space map is not done yet. | 2048 // deserialization because the free space map is not done yet. |
2049 } | 2049 } |
2050 | 2050 |
2051 | 2051 |
2052 FreeListNode* FreeListNode::next() { | 2052 FreeListNode* FreeListNode::next() { |
2053 ASSERT(IsFreeListNode(this)); | 2053 DCHECK(IsFreeListNode(this)); |
2054 if (map() == GetHeap()->raw_unchecked_free_space_map()) { | 2054 if (map() == GetHeap()->raw_unchecked_free_space_map()) { |
2055 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); | 2055 DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize); |
2056 return reinterpret_cast<FreeListNode*>( | 2056 return reinterpret_cast<FreeListNode*>( |
2057 Memory::Address_at(address() + kNextOffset)); | 2057 Memory::Address_at(address() + kNextOffset)); |
2058 } else { | 2058 } else { |
2059 return reinterpret_cast<FreeListNode*>( | 2059 return reinterpret_cast<FreeListNode*>( |
2060 Memory::Address_at(address() + kPointerSize)); | 2060 Memory::Address_at(address() + kPointerSize)); |
2061 } | 2061 } |
2062 } | 2062 } |
2063 | 2063 |
2064 | 2064 |
2065 FreeListNode** FreeListNode::next_address() { | 2065 FreeListNode** FreeListNode::next_address() { |
2066 ASSERT(IsFreeListNode(this)); | 2066 DCHECK(IsFreeListNode(this)); |
2067 if (map() == GetHeap()->raw_unchecked_free_space_map()) { | 2067 if (map() == GetHeap()->raw_unchecked_free_space_map()) { |
2068 ASSERT(Size() >= kNextOffset + kPointerSize); | 2068 DCHECK(Size() >= kNextOffset + kPointerSize); |
2069 return reinterpret_cast<FreeListNode**>(address() + kNextOffset); | 2069 return reinterpret_cast<FreeListNode**>(address() + kNextOffset); |
2070 } else { | 2070 } else { |
2071 return reinterpret_cast<FreeListNode**>(address() + kPointerSize); | 2071 return reinterpret_cast<FreeListNode**>(address() + kPointerSize); |
2072 } | 2072 } |
2073 } | 2073 } |
2074 | 2074 |
2075 | 2075 |
2076 void FreeListNode::set_next(FreeListNode* next) { | 2076 void FreeListNode::set_next(FreeListNode* next) { |
2077 ASSERT(IsFreeListNode(this)); | 2077 DCHECK(IsFreeListNode(this)); |
2078 // While we are booting the VM the free space map will actually be null. So | 2078 // While we are booting the VM the free space map will actually be null. So |
2079 // we have to make sure that we don't try to use it for anything at that | 2079 // we have to make sure that we don't try to use it for anything at that |
2080 // stage. | 2080 // stage. |
2081 if (map() == GetHeap()->raw_unchecked_free_space_map()) { | 2081 if (map() == GetHeap()->raw_unchecked_free_space_map()) { |
2082 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); | 2082 DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize); |
2083 base::NoBarrier_Store( | 2083 base::NoBarrier_Store( |
2084 reinterpret_cast<base::AtomicWord*>(address() + kNextOffset), | 2084 reinterpret_cast<base::AtomicWord*>(address() + kNextOffset), |
2085 reinterpret_cast<base::AtomicWord>(next)); | 2085 reinterpret_cast<base::AtomicWord>(next)); |
2086 } else { | 2086 } else { |
2087 base::NoBarrier_Store( | 2087 base::NoBarrier_Store( |
2088 reinterpret_cast<base::AtomicWord*>(address() + kPointerSize), | 2088 reinterpret_cast<base::AtomicWord*>(address() + kPointerSize), |
2089 reinterpret_cast<base::AtomicWord>(next)); | 2089 reinterpret_cast<base::AtomicWord>(next)); |
2090 } | 2090 } |
2091 } | 2091 } |
2092 | 2092 |
2093 | 2093 |
2094 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { | 2094 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { |
2095 intptr_t free_bytes = 0; | 2095 intptr_t free_bytes = 0; |
2096 if (category->top() != NULL) { | 2096 if (category->top() != NULL) { |
2097 // This is safe (not going to deadlock) since Concatenate operations | 2097 // This is safe (not going to deadlock) since Concatenate operations |
2098 // are never performed on the same free lists at the same time in | 2098 // are never performed on the same free lists at the same time in |
2099 // reverse order. | 2099 // reverse order. |
2100 base::LockGuard<base::Mutex> target_lock_guard(mutex()); | 2100 base::LockGuard<base::Mutex> target_lock_guard(mutex()); |
2101 base::LockGuard<base::Mutex> source_lock_guard(category->mutex()); | 2101 base::LockGuard<base::Mutex> source_lock_guard(category->mutex()); |
2102 ASSERT(category->end_ != NULL); | 2102 DCHECK(category->end_ != NULL); |
2103 free_bytes = category->available(); | 2103 free_bytes = category->available(); |
2104 if (end_ == NULL) { | 2104 if (end_ == NULL) { |
2105 end_ = category->end(); | 2105 end_ = category->end(); |
2106 } else { | 2106 } else { |
2107 category->end()->set_next(top()); | 2107 category->end()->set_next(top()); |
2108 } | 2108 } |
2109 set_top(category->top()); | 2109 set_top(category->top()); |
2110 base::NoBarrier_Store(&top_, category->top_); | 2110 base::NoBarrier_Store(&top_, category->top_); |
2111 available_ += category->available(); | 2111 available_ += category->available(); |
2112 category->Reset(); | 2112 category->Reset(); |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2203 } | 2203 } |
2204 | 2204 |
2205 | 2205 |
2206 void FreeListCategory::RepairFreeList(Heap* heap) { | 2206 void FreeListCategory::RepairFreeList(Heap* heap) { |
2207 FreeListNode* n = top(); | 2207 FreeListNode* n = top(); |
2208 while (n != NULL) { | 2208 while (n != NULL) { |
2209 Map** map_location = reinterpret_cast<Map**>(n->address()); | 2209 Map** map_location = reinterpret_cast<Map**>(n->address()); |
2210 if (*map_location == NULL) { | 2210 if (*map_location == NULL) { |
2211 *map_location = heap->free_space_map(); | 2211 *map_location = heap->free_space_map(); |
2212 } else { | 2212 } else { |
2213 ASSERT(*map_location == heap->free_space_map()); | 2213 DCHECK(*map_location == heap->free_space_map()); |
2214 } | 2214 } |
2215 n = n->next(); | 2215 n = n->next(); |
2216 } | 2216 } |
2217 } | 2217 } |
2218 | 2218 |
2219 | 2219 |
2220 FreeList::FreeList(PagedSpace* owner) | 2220 FreeList::FreeList(PagedSpace* owner) |
2221 : owner_(owner), heap_(owner->heap()) { | 2221 : owner_(owner), heap_(owner->heap()) { |
2222 Reset(); | 2222 Reset(); |
2223 } | 2223 } |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2263 medium_list_.Free(node, size_in_bytes); | 2263 medium_list_.Free(node, size_in_bytes); |
2264 page->add_available_in_medium_free_list(size_in_bytes); | 2264 page->add_available_in_medium_free_list(size_in_bytes); |
2265 } else if (size_in_bytes <= kLargeListMax) { | 2265 } else if (size_in_bytes <= kLargeListMax) { |
2266 large_list_.Free(node, size_in_bytes); | 2266 large_list_.Free(node, size_in_bytes); |
2267 page->add_available_in_large_free_list(size_in_bytes); | 2267 page->add_available_in_large_free_list(size_in_bytes); |
2268 } else { | 2268 } else { |
2269 huge_list_.Free(node, size_in_bytes); | 2269 huge_list_.Free(node, size_in_bytes); |
2270 page->add_available_in_huge_free_list(size_in_bytes); | 2270 page->add_available_in_huge_free_list(size_in_bytes); |
2271 } | 2271 } |
2272 | 2272 |
2273 ASSERT(IsVeryLong() || available() == SumFreeLists()); | 2273 DCHECK(IsVeryLong() || available() == SumFreeLists()); |
2274 return 0; | 2274 return 0; |
2275 } | 2275 } |
2276 | 2276 |
2277 | 2277 |
2278 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { | 2278 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { |
2279 FreeListNode* node = NULL; | 2279 FreeListNode* node = NULL; |
2280 Page* page = NULL; | 2280 Page* page = NULL; |
2281 | 2281 |
2282 if (size_in_bytes <= kSmallAllocationMax) { | 2282 if (size_in_bytes <= kSmallAllocationMax) { |
2283 node = small_list_.PickNodeFromList(node_size); | 2283 node = small_list_.PickNodeFromList(node_size); |
2284 if (node != NULL) { | 2284 if (node != NULL) { |
2285 ASSERT(size_in_bytes <= *node_size); | 2285 DCHECK(size_in_bytes <= *node_size); |
2286 page = Page::FromAddress(node->address()); | 2286 page = Page::FromAddress(node->address()); |
2287 page->add_available_in_small_free_list(-(*node_size)); | 2287 page->add_available_in_small_free_list(-(*node_size)); |
2288 ASSERT(IsVeryLong() || available() == SumFreeLists()); | 2288 DCHECK(IsVeryLong() || available() == SumFreeLists()); |
2289 return node; | 2289 return node; |
2290 } | 2290 } |
2291 } | 2291 } |
2292 | 2292 |
2293 if (size_in_bytes <= kMediumAllocationMax) { | 2293 if (size_in_bytes <= kMediumAllocationMax) { |
2294 node = medium_list_.PickNodeFromList(node_size); | 2294 node = medium_list_.PickNodeFromList(node_size); |
2295 if (node != NULL) { | 2295 if (node != NULL) { |
2296 ASSERT(size_in_bytes <= *node_size); | 2296 DCHECK(size_in_bytes <= *node_size); |
2297 page = Page::FromAddress(node->address()); | 2297 page = Page::FromAddress(node->address()); |
2298 page->add_available_in_medium_free_list(-(*node_size)); | 2298 page->add_available_in_medium_free_list(-(*node_size)); |
2299 ASSERT(IsVeryLong() || available() == SumFreeLists()); | 2299 DCHECK(IsVeryLong() || available() == SumFreeLists()); |
2300 return node; | 2300 return node; |
2301 } | 2301 } |
2302 } | 2302 } |
2303 | 2303 |
2304 if (size_in_bytes <= kLargeAllocationMax) { | 2304 if (size_in_bytes <= kLargeAllocationMax) { |
2305 node = large_list_.PickNodeFromList(node_size); | 2305 node = large_list_.PickNodeFromList(node_size); |
2306 if (node != NULL) { | 2306 if (node != NULL) { |
2307 ASSERT(size_in_bytes <= *node_size); | 2307 DCHECK(size_in_bytes <= *node_size); |
2308 page = Page::FromAddress(node->address()); | 2308 page = Page::FromAddress(node->address()); |
2309 page->add_available_in_large_free_list(-(*node_size)); | 2309 page->add_available_in_large_free_list(-(*node_size)); |
2310 ASSERT(IsVeryLong() || available() == SumFreeLists()); | 2310 DCHECK(IsVeryLong() || available() == SumFreeLists()); |
2311 return node; | 2311 return node; |
2312 } | 2312 } |
2313 } | 2313 } |
2314 | 2314 |
2315 int huge_list_available = huge_list_.available(); | 2315 int huge_list_available = huge_list_.available(); |
2316 FreeListNode* top_node = huge_list_.top(); | 2316 FreeListNode* top_node = huge_list_.top(); |
2317 for (FreeListNode** cur = &top_node; | 2317 for (FreeListNode** cur = &top_node; |
2318 *cur != NULL; | 2318 *cur != NULL; |
2319 cur = (*cur)->next_address()) { | 2319 cur = (*cur)->next_address()) { |
2320 FreeListNode* cur_node = *cur; | 2320 FreeListNode* cur_node = *cur; |
2321 while (cur_node != NULL && | 2321 while (cur_node != NULL && |
2322 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { | 2322 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { |
2323 int size = reinterpret_cast<FreeSpace*>(cur_node)->Size(); | 2323 int size = reinterpret_cast<FreeSpace*>(cur_node)->Size(); |
2324 huge_list_available -= size; | 2324 huge_list_available -= size; |
2325 page = Page::FromAddress(cur_node->address()); | 2325 page = Page::FromAddress(cur_node->address()); |
2326 page->add_available_in_huge_free_list(-size); | 2326 page->add_available_in_huge_free_list(-size); |
2327 cur_node = cur_node->next(); | 2327 cur_node = cur_node->next(); |
2328 } | 2328 } |
2329 | 2329 |
2330 *cur = cur_node; | 2330 *cur = cur_node; |
2331 if (cur_node == NULL) { | 2331 if (cur_node == NULL) { |
2332 huge_list_.set_end(NULL); | 2332 huge_list_.set_end(NULL); |
2333 break; | 2333 break; |
2334 } | 2334 } |
2335 | 2335 |
2336 ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map()); | 2336 DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map()); |
2337 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); | 2337 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); |
2338 int size = cur_as_free_space->Size(); | 2338 int size = cur_as_free_space->Size(); |
2339 if (size >= size_in_bytes) { | 2339 if (size >= size_in_bytes) { |
2340 // Large enough node found. Unlink it from the list. | 2340 // Large enough node found. Unlink it from the list. |
2341 node = *cur; | 2341 node = *cur; |
2342 *cur = node->next(); | 2342 *cur = node->next(); |
2343 *node_size = size; | 2343 *node_size = size; |
2344 huge_list_available -= size; | 2344 huge_list_available -= size; |
2345 page = Page::FromAddress(node->address()); | 2345 page = Page::FromAddress(node->address()); |
2346 page->add_available_in_huge_free_list(-size); | 2346 page->add_available_in_huge_free_list(-size); |
2347 break; | 2347 break; |
2348 } | 2348 } |
2349 } | 2349 } |
2350 | 2350 |
2351 huge_list_.set_top(top_node); | 2351 huge_list_.set_top(top_node); |
2352 if (huge_list_.top() == NULL) { | 2352 if (huge_list_.top() == NULL) { |
2353 huge_list_.set_end(NULL); | 2353 huge_list_.set_end(NULL); |
2354 } | 2354 } |
2355 huge_list_.set_available(huge_list_available); | 2355 huge_list_.set_available(huge_list_available); |
2356 | 2356 |
2357 if (node != NULL) { | 2357 if (node != NULL) { |
2358 ASSERT(IsVeryLong() || available() == SumFreeLists()); | 2358 DCHECK(IsVeryLong() || available() == SumFreeLists()); |
2359 return node; | 2359 return node; |
2360 } | 2360 } |
2361 | 2361 |
2362 if (size_in_bytes <= kSmallListMax) { | 2362 if (size_in_bytes <= kSmallListMax) { |
2363 node = small_list_.PickNodeFromList(size_in_bytes, node_size); | 2363 node = small_list_.PickNodeFromList(size_in_bytes, node_size); |
2364 if (node != NULL) { | 2364 if (node != NULL) { |
2365 ASSERT(size_in_bytes <= *node_size); | 2365 DCHECK(size_in_bytes <= *node_size); |
2366 page = Page::FromAddress(node->address()); | 2366 page = Page::FromAddress(node->address()); |
2367 page->add_available_in_small_free_list(-(*node_size)); | 2367 page->add_available_in_small_free_list(-(*node_size)); |
2368 } | 2368 } |
2369 } else if (size_in_bytes <= kMediumListMax) { | 2369 } else if (size_in_bytes <= kMediumListMax) { |
2370 node = medium_list_.PickNodeFromList(size_in_bytes, node_size); | 2370 node = medium_list_.PickNodeFromList(size_in_bytes, node_size); |
2371 if (node != NULL) { | 2371 if (node != NULL) { |
2372 ASSERT(size_in_bytes <= *node_size); | 2372 DCHECK(size_in_bytes <= *node_size); |
2373 page = Page::FromAddress(node->address()); | 2373 page = Page::FromAddress(node->address()); |
2374 page->add_available_in_medium_free_list(-(*node_size)); | 2374 page->add_available_in_medium_free_list(-(*node_size)); |
2375 } | 2375 } |
2376 } else if (size_in_bytes <= kLargeListMax) { | 2376 } else if (size_in_bytes <= kLargeListMax) { |
2377 node = large_list_.PickNodeFromList(size_in_bytes, node_size); | 2377 node = large_list_.PickNodeFromList(size_in_bytes, node_size); |
2378 if (node != NULL) { | 2378 if (node != NULL) { |
2379 ASSERT(size_in_bytes <= *node_size); | 2379 DCHECK(size_in_bytes <= *node_size); |
2380 page = Page::FromAddress(node->address()); | 2380 page = Page::FromAddress(node->address()); |
2381 page->add_available_in_large_free_list(-(*node_size)); | 2381 page->add_available_in_large_free_list(-(*node_size)); |
2382 } | 2382 } |
2383 } | 2383 } |
2384 | 2384 |
2385 ASSERT(IsVeryLong() || available() == SumFreeLists()); | 2385 DCHECK(IsVeryLong() || available() == SumFreeLists()); |
2386 return node; | 2386 return node; |
2387 } | 2387 } |
2388 | 2388 |
2389 | 2389 |
2390 // Allocation on the old space free list. If it succeeds then a new linear | 2390 // Allocation on the old space free list. If it succeeds then a new linear |
2391 // allocation space has been set up with the top and limit of the space. If | 2391 // allocation space has been set up with the top and limit of the space. If |
2392 // the allocation fails then NULL is returned, and the caller can perform a GC | 2392 // the allocation fails then NULL is returned, and the caller can perform a GC |
2393 // or allocate a new page before retrying. | 2393 // or allocate a new page before retrying. |
2394 HeapObject* FreeList::Allocate(int size_in_bytes) { | 2394 HeapObject* FreeList::Allocate(int size_in_bytes) { |
2395 ASSERT(0 < size_in_bytes); | 2395 DCHECK(0 < size_in_bytes); |
2396 ASSERT(size_in_bytes <= kMaxBlockSize); | 2396 DCHECK(size_in_bytes <= kMaxBlockSize); |
2397 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 2397 DCHECK(IsAligned(size_in_bytes, kPointerSize)); |
2398 // Don't free list allocate if there is linear space available. | 2398 // Don't free list allocate if there is linear space available. |
2399 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); | 2399 DCHECK(owner_->limit() - owner_->top() < size_in_bytes); |
2400 | 2400 |
2401 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); | 2401 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); |
2402 // Mark the old linear allocation area with a free space map so it can be | 2402 // Mark the old linear allocation area with a free space map so it can be |
2403 // skipped when scanning the heap. This also puts it back in the free list | 2403 // skipped when scanning the heap. This also puts it back in the free list |
2404 // if it is big enough. | 2404 // if it is big enough. |
2405 owner_->Free(owner_->top(), old_linear_size); | 2405 owner_->Free(owner_->top(), old_linear_size); |
2406 | 2406 |
2407 owner_->heap()->incremental_marking()->OldSpaceStep( | 2407 owner_->heap()->incremental_marking()->OldSpaceStep( |
2408 size_in_bytes - old_linear_size); | 2408 size_in_bytes - old_linear_size); |
2409 | 2409 |
2410 int new_node_size = 0; | 2410 int new_node_size = 0; |
2411 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); | 2411 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); |
2412 if (new_node == NULL) { | 2412 if (new_node == NULL) { |
2413 owner_->SetTopAndLimit(NULL, NULL); | 2413 owner_->SetTopAndLimit(NULL, NULL); |
2414 return NULL; | 2414 return NULL; |
2415 } | 2415 } |
2416 | 2416 |
2417 int bytes_left = new_node_size - size_in_bytes; | 2417 int bytes_left = new_node_size - size_in_bytes; |
2418 ASSERT(bytes_left >= 0); | 2418 DCHECK(bytes_left >= 0); |
2419 | 2419 |
2420 #ifdef DEBUG | 2420 #ifdef DEBUG |
2421 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { | 2421 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { |
2422 reinterpret_cast<Object**>(new_node->address())[i] = | 2422 reinterpret_cast<Object**>(new_node->address())[i] = |
2423 Smi::FromInt(kCodeZapValue); | 2423 Smi::FromInt(kCodeZapValue); |
2424 } | 2424 } |
2425 #endif | 2425 #endif |
2426 | 2426 |
2427 // The old-space-step might have finished sweeping and restarted marking. | 2427 // The old-space-step might have finished sweeping and restarted marking. |
2428 // Verify that it did not turn the page of the new node into an evacuation | 2428 // Verify that it did not turn the page of the new node into an evacuation |
2429 // candidate. | 2429 // candidate. |
2430 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); | 2430 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); |
2431 | 2431 |
2432 const int kThreshold = IncrementalMarking::kAllocatedThreshold; | 2432 const int kThreshold = IncrementalMarking::kAllocatedThreshold; |
2433 | 2433 |
2434 // Memory in the linear allocation area is counted as allocated. We may free | 2434 // Memory in the linear allocation area is counted as allocated. We may free |
2435 // a little of this again immediately - see below. | 2435 // a little of this again immediately - see below. |
2436 owner_->Allocate(new_node_size); | 2436 owner_->Allocate(new_node_size); |
2437 | 2437 |
2438 if (owner_->heap()->inline_allocation_disabled()) { | 2438 if (owner_->heap()->inline_allocation_disabled()) { |
2439 // Keep the linear allocation area empty if requested to do so, just | 2439 // Keep the linear allocation area empty if requested to do so, just |
2440 // return area back to the free list instead. | 2440 // return area back to the free list instead. |
2441 owner_->Free(new_node->address() + size_in_bytes, bytes_left); | 2441 owner_->Free(new_node->address() + size_in_bytes, bytes_left); |
2442 ASSERT(owner_->top() == NULL && owner_->limit() == NULL); | 2442 DCHECK(owner_->top() == NULL && owner_->limit() == NULL); |
2443 } else if (bytes_left > kThreshold && | 2443 } else if (bytes_left > kThreshold && |
2444 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && | 2444 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && |
2445 FLAG_incremental_marking_steps) { | 2445 FLAG_incremental_marking_steps) { |
2446 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); | 2446 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); |
2447 // We don't want to give too large linear areas to the allocator while | 2447 // We don't want to give too large linear areas to the allocator while |
2448 // incremental marking is going on, because we won't check again whether | 2448 // incremental marking is going on, because we won't check again whether |
2449 // we want to do another increment until the linear area is used up. | 2449 // we want to do another increment until the linear area is used up. |
2450 owner_->Free(new_node->address() + size_in_bytes + linear_size, | 2450 owner_->Free(new_node->address() + size_in_bytes + linear_size, |
2451 new_node_size - size_in_bytes - linear_size); | 2451 new_node_size - size_in_bytes - linear_size); |
2452 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, | 2452 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2497 large_list_.RepairFreeList(heap); | 2497 large_list_.RepairFreeList(heap); |
2498 huge_list_.RepairFreeList(heap); | 2498 huge_list_.RepairFreeList(heap); |
2499 } | 2499 } |
2500 | 2500 |
2501 | 2501 |
2502 #ifdef DEBUG | 2502 #ifdef DEBUG |
2503 intptr_t FreeListCategory::SumFreeList() { | 2503 intptr_t FreeListCategory::SumFreeList() { |
2504 intptr_t sum = 0; | 2504 intptr_t sum = 0; |
2505 FreeListNode* cur = top(); | 2505 FreeListNode* cur = top(); |
2506 while (cur != NULL) { | 2506 while (cur != NULL) { |
2507 ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map()); | 2507 DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map()); |
2508 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur); | 2508 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur); |
2509 sum += cur_as_free_space->nobarrier_size(); | 2509 sum += cur_as_free_space->nobarrier_size(); |
2510 cur = cur->next(); | 2510 cur = cur->next(); |
2511 } | 2511 } |
2512 return sum; | 2512 return sum; |
2513 } | 2513 } |
2514 | 2514 |
2515 | 2515 |
2516 static const int kVeryLongFreeList = 500; | 2516 static const int kVeryLongFreeList = 500; |
2517 | 2517 |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2561 // This counter will be increased for pages which will be swept by the | 2561 // This counter will be increased for pages which will be swept by the |
2562 // sweeper threads. | 2562 // sweeper threads. |
2563 unswept_free_bytes_ = 0; | 2563 unswept_free_bytes_ = 0; |
2564 | 2564 |
2565 // Clear the free list before a full GC---it will be rebuilt afterward. | 2565 // Clear the free list before a full GC---it will be rebuilt afterward. |
2566 free_list_.Reset(); | 2566 free_list_.Reset(); |
2567 } | 2567 } |
2568 | 2568 |
2569 | 2569 |
2570 intptr_t PagedSpace::SizeOfObjects() { | 2570 intptr_t PagedSpace::SizeOfObjects() { |
2571 ASSERT(heap()->mark_compact_collector()->sweeping_in_progress() || | 2571 DCHECK(heap()->mark_compact_collector()->sweeping_in_progress() || |
2572 (unswept_free_bytes_ == 0)); | 2572 (unswept_free_bytes_ == 0)); |
2573 return Size() - unswept_free_bytes_ - (limit() - top()); | 2573 return Size() - unswept_free_bytes_ - (limit() - top()); |
2574 } | 2574 } |
2575 | 2575 |
2576 | 2576 |
2577 // After we have booted, we have created a map which represents free space | 2577 // After we have booted, we have created a map which represents free space |
2578 // on the heap. If there was already a free list then the elements on it | 2578 // on the heap. If there was already a free list then the elements on it |
2579 // were created with the wrong FreeSpaceMap (normally NULL), so we need to | 2579 // were created with the wrong FreeSpaceMap (normally NULL), so we need to |
2580 // fix them. | 2580 // fix them. |
2581 void PagedSpace::RepairFreeListsAfterBoot() { | 2581 void PagedSpace::RepairFreeListsAfterBoot() { |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2629 if (object != NULL) return object; | 2629 if (object != NULL) return object; |
2630 | 2630 |
2631 // If sweeping is still in progress try to sweep pages on the main thread. | 2631 // If sweeping is still in progress try to sweep pages on the main thread. |
2632 int free_chunk = | 2632 int free_chunk = |
2633 collector->SweepInParallel(this, size_in_bytes); | 2633 collector->SweepInParallel(this, size_in_bytes); |
2634 collector->RefillFreeList(this); | 2634 collector->RefillFreeList(this); |
2635 if (free_chunk >= size_in_bytes) { | 2635 if (free_chunk >= size_in_bytes) { |
2636 HeapObject* object = free_list_.Allocate(size_in_bytes); | 2636 HeapObject* object = free_list_.Allocate(size_in_bytes); |
2637 // We should be able to allocate an object here since we just freed that | 2637 // We should be able to allocate an object here since we just freed that |
2638 // much memory. | 2638 // much memory. |
2639 ASSERT(object != NULL); | 2639 DCHECK(object != NULL); |
2640 if (object != NULL) return object; | 2640 if (object != NULL) return object; |
2641 } | 2641 } |
2642 } | 2642 } |
2643 | 2643 |
2644 // Free list allocation failed and there is no next page. Fail if we have | 2644 // Free list allocation failed and there is no next page. Fail if we have |
2645 // hit the old generation size limit that should cause a garbage | 2645 // hit the old generation size limit that should cause a garbage |
2646 // collection. | 2646 // collection. |
2647 if (!heap()->always_allocate() | 2647 if (!heap()->always_allocate() |
2648 && heap()->OldGenerationAllocationLimitReached()) { | 2648 && heap()->OldGenerationAllocationLimitReached()) { |
2649 // If sweeper threads are active, wait for them at that point and steal | 2649 // If sweeper threads are active, wait for them at that point and steal |
2650 // elements form their free-lists. | 2650 // elements form their free-lists. |
2651 HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); | 2651 HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); |
2652 if (object != NULL) return object; | 2652 if (object != NULL) return object; |
2653 } | 2653 } |
2654 | 2654 |
2655 // Try to expand the space and allocate in the new next page. | 2655 // Try to expand the space and allocate in the new next page. |
2656 if (Expand()) { | 2656 if (Expand()) { |
2657 ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available()); | 2657 DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available()); |
2658 return free_list_.Allocate(size_in_bytes); | 2658 return free_list_.Allocate(size_in_bytes); |
2659 } | 2659 } |
2660 | 2660 |
2661 // If sweeper threads are active, wait for them at that point and steal | 2661 // If sweeper threads are active, wait for them at that point and steal |
2662 // elements form their free-lists. Allocation may still fail their which | 2662 // elements form their free-lists. Allocation may still fail their which |
2663 // would indicate that there is not enough memory for the given allocation. | 2663 // would indicate that there is not enough memory for the given allocation. |
2664 return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); | 2664 return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); |
2665 } | 2665 } |
2666 | 2666 |
2667 | 2667 |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2718 } | 2718 } |
2719 // Update entry for 'comment' | 2719 // Update entry for 'comment' |
2720 cs->size += delta; | 2720 cs->size += delta; |
2721 cs->count += 1; | 2721 cs->count += 1; |
2722 } | 2722 } |
2723 | 2723 |
2724 | 2724 |
2725 // Call for each nested comment start (start marked with '[ xxx', end marked | 2725 // Call for each nested comment start (start marked with '[ xxx', end marked |
2726 // with ']'. RelocIterator 'it' must point to a comment reloc info. | 2726 // with ']'. RelocIterator 'it' must point to a comment reloc info. |
2727 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) { | 2727 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) { |
2728 ASSERT(!it->done()); | 2728 DCHECK(!it->done()); |
2729 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT); | 2729 DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT); |
2730 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data()); | 2730 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data()); |
2731 if (tmp[0] != '[') { | 2731 if (tmp[0] != '[') { |
2732 // Not a nested comment; skip | 2732 // Not a nested comment; skip |
2733 return; | 2733 return; |
2734 } | 2734 } |
2735 | 2735 |
2736 // Search for end of nested comment or a new nested comment | 2736 // Search for end of nested comment or a new nested comment |
2737 const char* const comment_txt = | 2737 const char* const comment_txt = |
2738 reinterpret_cast<const char*>(it->rinfo()->data()); | 2738 reinterpret_cast<const char*>(it->rinfo()->data()); |
2739 const byte* prev_pc = it->rinfo()->pc(); | 2739 const byte* prev_pc = it->rinfo()->pc(); |
2740 int flat_delta = 0; | 2740 int flat_delta = 0; |
2741 it->next(); | 2741 it->next(); |
2742 while (true) { | 2742 while (true) { |
2743 // All nested comments must be terminated properly, and therefore exit | 2743 // All nested comments must be terminated properly, and therefore exit |
2744 // from loop. | 2744 // from loop. |
2745 ASSERT(!it->done()); | 2745 DCHECK(!it->done()); |
2746 if (it->rinfo()->rmode() == RelocInfo::COMMENT) { | 2746 if (it->rinfo()->rmode() == RelocInfo::COMMENT) { |
2747 const char* const txt = | 2747 const char* const txt = |
2748 reinterpret_cast<const char*>(it->rinfo()->data()); | 2748 reinterpret_cast<const char*>(it->rinfo()->data()); |
2749 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc); | 2749 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc); |
2750 if (txt[0] == ']') break; // End of nested comment | 2750 if (txt[0] == ']') break; // End of nested comment |
2751 // A new comment | 2751 // A new comment |
2752 CollectCommentStatistics(isolate, it); | 2752 CollectCommentStatistics(isolate, it); |
2753 // Skip code that was covered with previous comment | 2753 // Skip code that was covered with previous comment |
2754 prev_pc = it->rinfo()->pc(); | 2754 prev_pc = it->rinfo()->pc(); |
2755 } | 2755 } |
(...skipping 18 matching lines...) Expand all Loading... |
2774 const byte* prev_pc = code->instruction_start(); | 2774 const byte* prev_pc = code->instruction_start(); |
2775 while (!it.done()) { | 2775 while (!it.done()) { |
2776 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { | 2776 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { |
2777 delta += static_cast<int>(it.rinfo()->pc() - prev_pc); | 2777 delta += static_cast<int>(it.rinfo()->pc() - prev_pc); |
2778 CollectCommentStatistics(isolate, &it); | 2778 CollectCommentStatistics(isolate, &it); |
2779 prev_pc = it.rinfo()->pc(); | 2779 prev_pc = it.rinfo()->pc(); |
2780 } | 2780 } |
2781 it.next(); | 2781 it.next(); |
2782 } | 2782 } |
2783 | 2783 |
2784 ASSERT(code->instruction_start() <= prev_pc && | 2784 DCHECK(code->instruction_start() <= prev_pc && |
2785 prev_pc <= code->instruction_end()); | 2785 prev_pc <= code->instruction_end()); |
2786 delta += static_cast<int>(code->instruction_end() - prev_pc); | 2786 delta += static_cast<int>(code->instruction_end() - prev_pc); |
2787 EnterComment(isolate, "NoComment", delta); | 2787 EnterComment(isolate, "NoComment", delta); |
2788 } | 2788 } |
2789 } | 2789 } |
2790 } | 2790 } |
2791 | 2791 |
2792 | 2792 |
2793 void PagedSpace::ReportStatistics() { | 2793 void PagedSpace::ReportStatistics() { |
2794 int pct = static_cast<int>(Available() * 100 / Capacity()); | 2794 int pct = static_cast<int>(Available() * 100 / Capacity()); |
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2913 return AllocationResult::Retry(identity()); | 2913 return AllocationResult::Retry(identity()); |
2914 } | 2914 } |
2915 | 2915 |
2916 if (Size() + object_size > max_capacity_) { | 2916 if (Size() + object_size > max_capacity_) { |
2917 return AllocationResult::Retry(identity()); | 2917 return AllocationResult::Retry(identity()); |
2918 } | 2918 } |
2919 | 2919 |
2920 LargePage* page = heap()->isolate()->memory_allocator()-> | 2920 LargePage* page = heap()->isolate()->memory_allocator()-> |
2921 AllocateLargePage(object_size, this, executable); | 2921 AllocateLargePage(object_size, this, executable); |
2922 if (page == NULL) return AllocationResult::Retry(identity()); | 2922 if (page == NULL) return AllocationResult::Retry(identity()); |
2923 ASSERT(page->area_size() >= object_size); | 2923 DCHECK(page->area_size() >= object_size); |
2924 | 2924 |
2925 size_ += static_cast<int>(page->size()); | 2925 size_ += static_cast<int>(page->size()); |
2926 objects_size_ += object_size; | 2926 objects_size_ += object_size; |
2927 page_count_++; | 2927 page_count_++; |
2928 page->set_next_page(first_page_); | 2928 page->set_next_page(first_page_); |
2929 first_page_ = page; | 2929 first_page_ = page; |
2930 | 2930 |
2931 if (size_ > maximum_committed_) { | 2931 if (size_ > maximum_committed_) { |
2932 maximum_committed_ = size_; | 2932 maximum_committed_ = size_; |
2933 } | 2933 } |
2934 | 2934 |
2935 // Register all MemoryChunk::kAlignment-aligned chunks covered by | 2935 // Register all MemoryChunk::kAlignment-aligned chunks covered by |
2936 // this large page in the chunk map. | 2936 // this large page in the chunk map. |
2937 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; | 2937 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; |
2938 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; | 2938 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; |
2939 for (uintptr_t key = base; key <= limit; key++) { | 2939 for (uintptr_t key = base; key <= limit; key++) { |
2940 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), | 2940 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), |
2941 static_cast<uint32_t>(key), | 2941 static_cast<uint32_t>(key), |
2942 true); | 2942 true); |
2943 ASSERT(entry != NULL); | 2943 DCHECK(entry != NULL); |
2944 entry->value = page; | 2944 entry->value = page; |
2945 } | 2945 } |
2946 | 2946 |
2947 HeapObject* object = page->GetObject(); | 2947 HeapObject* object = page->GetObject(); |
2948 | 2948 |
2949 if (Heap::ShouldZapGarbage()) { | 2949 if (Heap::ShouldZapGarbage()) { |
2950 // Make the object consistent so the heap can be verified in OldSpaceStep. | 2950 // Make the object consistent so the heap can be verified in OldSpaceStep. |
2951 // We only need to do this in debug builds or if verify_heap is on. | 2951 // We only need to do this in debug builds or if verify_heap is on. |
2952 reinterpret_cast<Object**>(object->address())[0] = | 2952 reinterpret_cast<Object**>(object->address())[0] = |
2953 heap()->fixed_array_map(); | 2953 heap()->fixed_array_map(); |
(...skipping 26 matching lines...) Expand all Loading... |
2980 return Smi::FromInt(0); // Signaling not found. | 2980 return Smi::FromInt(0); // Signaling not found. |
2981 } | 2981 } |
2982 | 2982 |
2983 | 2983 |
2984 LargePage* LargeObjectSpace::FindPage(Address a) { | 2984 LargePage* LargeObjectSpace::FindPage(Address a) { |
2985 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment; | 2985 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment; |
2986 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key), | 2986 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key), |
2987 static_cast<uint32_t>(key), | 2987 static_cast<uint32_t>(key), |
2988 false); | 2988 false); |
2989 if (e != NULL) { | 2989 if (e != NULL) { |
2990 ASSERT(e->value != NULL); | 2990 DCHECK(e->value != NULL); |
2991 LargePage* page = reinterpret_cast<LargePage*>(e->value); | 2991 LargePage* page = reinterpret_cast<LargePage*>(e->value); |
2992 ASSERT(page->is_valid()); | 2992 DCHECK(page->is_valid()); |
2993 if (page->Contains(a)) { | 2993 if (page->Contains(a)) { |
2994 return page; | 2994 return page; |
2995 } | 2995 } |
2996 } | 2996 } |
2997 return NULL; | 2997 return NULL; |
2998 } | 2998 } |
2999 | 2999 |
3000 | 3000 |
3001 void LargeObjectSpace::FreeUnmarkedObjects() { | 3001 void LargeObjectSpace::FreeUnmarkedObjects() { |
3002 LargePage* previous = NULL; | 3002 LargePage* previous = NULL; |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3051 heap()->FreeQueuedChunks(); | 3051 heap()->FreeQueuedChunks(); |
3052 } | 3052 } |
3053 | 3053 |
3054 | 3054 |
3055 bool LargeObjectSpace::Contains(HeapObject* object) { | 3055 bool LargeObjectSpace::Contains(HeapObject* object) { |
3056 Address address = object->address(); | 3056 Address address = object->address(); |
3057 MemoryChunk* chunk = MemoryChunk::FromAddress(address); | 3057 MemoryChunk* chunk = MemoryChunk::FromAddress(address); |
3058 | 3058 |
3059 bool owned = (chunk->owner() == this); | 3059 bool owned = (chunk->owner() == this); |
3060 | 3060 |
3061 SLOW_ASSERT(!owned || FindObject(address)->IsHeapObject()); | 3061 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject()); |
3062 | 3062 |
3063 return owned; | 3063 return owned; |
3064 } | 3064 } |
3065 | 3065 |
3066 | 3066 |
3067 #ifdef VERIFY_HEAP | 3067 #ifdef VERIFY_HEAP |
3068 // We do not assume that the large object iterator works, because it depends | 3068 // We do not assume that the large object iterator works, because it depends |
3069 // on the invariants we are checking during verification. | 3069 // on the invariants we are checking during verification. |
3070 void LargeObjectSpace::Verify() { | 3070 void LargeObjectSpace::Verify() { |
3071 for (LargePage* chunk = first_page_; | 3071 for (LargePage* chunk = first_page_; |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3174 object->ShortPrint(); | 3174 object->ShortPrint(); |
3175 PrintF("\n"); | 3175 PrintF("\n"); |
3176 } | 3176 } |
3177 printf(" --------------------------------------\n"); | 3177 printf(" --------------------------------------\n"); |
3178 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3178 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3179 } | 3179 } |
3180 | 3180 |
3181 #endif // DEBUG | 3181 #endif // DEBUG |
3182 | 3182 |
3183 } } // namespace v8::internal | 3183 } } // namespace v8::internal |
OLD | NEW |