OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/base/platform/platform.h" | 8 #include "src/base/platform/platform.h" |
9 #include "src/full-codegen.h" | 9 #include "src/full-codegen.h" |
10 #include "src/heap/mark-compact.h" | 10 #include "src/heap/mark-compact.h" |
(...skipping 1382 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1393 | 1393 |
1394 void NewSpace::ResetAllocationInfo() { | 1394 void NewSpace::ResetAllocationInfo() { |
1395 to_space_.Reset(); | 1395 to_space_.Reset(); |
1396 UpdateAllocationInfo(); | 1396 UpdateAllocationInfo(); |
1397 pages_used_ = 0; | 1397 pages_used_ = 0; |
1398 // Clear all mark-bits in the to-space. | 1398 // Clear all mark-bits in the to-space. |
1399 NewSpacePageIterator it(&to_space_); | 1399 NewSpacePageIterator it(&to_space_); |
1400 while (it.has_next()) { | 1400 while (it.has_next()) { |
1401 Bitmap::Clear(it.next()); | 1401 Bitmap::Clear(it.next()); |
1402 } | 1402 } |
1403 if (top_on_previous_step_) { | |
Hannes Payer (out of office)
2015/07/23 10:58:54
Resetting here make the step still imprecise. We a
ofrobots
2015/07/23 16:31:34
Acknowledged.
| |
1404 // Start a new step. | |
1405 top_on_previous_step_ = allocation_info_.top(); | |
1406 } | |
1403 } | 1407 } |
1404 | 1408 |
1405 | 1409 |
1406 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) { | 1410 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) { |
1407 if (heap()->inline_allocation_disabled()) { | 1411 if (heap()->inline_allocation_disabled()) { |
1408 // Lowest limit when linear allocation was disabled. | 1412 // Lowest limit when linear allocation was disabled. |
1409 Address high = to_space_.page_high(); | 1413 Address high = to_space_.page_high(); |
1410 Address new_top = allocation_info_.top() + size_in_bytes; | 1414 Address new_top = allocation_info_.top() + size_in_bytes; |
1411 allocation_info_.set_limit(Min(new_top, high)); | 1415 allocation_info_.set_limit(Min(new_top, high)); |
1412 } else if (inline_allocation_limit_step() == 0) { | 1416 } else if (inline_allocation_limit_step() == 0) { |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1464 return true; | 1468 return true; |
1465 } | 1469 } |
1466 | 1470 |
1467 | 1471 |
1468 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes, | 1472 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes, |
1469 AllocationAlignment alignment) { | 1473 AllocationAlignment alignment) { |
1470 Address old_top = allocation_info_.top(); | 1474 Address old_top = allocation_info_.top(); |
1471 Address high = to_space_.page_high(); | 1475 Address high = to_space_.page_high(); |
1472 if (allocation_info_.limit() < high) { | 1476 if (allocation_info_.limit() < high) { |
1473 int alignment_size = Heap::GetFillToAlign(old_top, alignment); | 1477 int alignment_size = Heap::GetFillToAlign(old_top, alignment); |
1474 int aligned_size_in_bytes = size_in_bytes + alignment_size; | 1478 int aligned_size_in_bytes = size_in_bytes + alignment_size; |
Hannes Payer (out of office)
2015/07/23 10:58:54
I think we do not need the complicated machinery t
ofrobots
2015/07/23 16:31:34
Okay, let me look into this.
ofrobots
2015/07/23 17:41:06
The reason this complicated machinery exists is to
Hannes Payer (out of office)
2015/07/24 06:32:10
Arg, yes. That is the reason why it is complicated
| |
1475 | 1479 |
1476 // Either the limit has been lowered because linear allocation was disabled | 1480 // Either the limit has been lowered because linear allocation was disabled |
1477 // or because incremental marking wants to get a chance to do a step. Set | 1481 // or because incremental marking wants to get a chance to do a step. Set |
1478 // the new limit accordingly. | 1482 // the new limit accordingly. |
1479 Address new_top = old_top + aligned_size_in_bytes; | 1483 Address new_top = old_top + aligned_size_in_bytes; |
1480 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); | 1484 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); |
1485 | |
1481 heap()->incremental_marking()->Step(bytes_allocated, | 1486 heap()->incremental_marking()->Step(bytes_allocated, |
1482 IncrementalMarking::GC_VIA_STACK_GUARD); | 1487 IncrementalMarking::GC_VIA_STACK_GUARD); |
1483 UpdateInlineAllocationLimit(aligned_size_in_bytes); | 1488 UpdateInlineAllocationLimit(aligned_size_in_bytes); |
1489 | |
1490 AllocationResult result = | |
1491 (alignment == kWordAligned) | |
1492 ? AllocateRawUnaligned(size_in_bytes) | |
1493 : AllocateRawAligned(size_in_bytes, alignment); | |
1484 top_on_previous_step_ = new_top; | 1494 top_on_previous_step_ = new_top; |
1485 if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes); | 1495 return result; |
1486 return AllocateRawAligned(size_in_bytes, alignment); | |
1487 } else if (AddFreshPage()) { | 1496 } else if (AddFreshPage()) { |
1488 // Switched to new page. Try allocating again. | 1497 // Switched to new page. Try allocating again. |
1489 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); | |
Hannes Payer (out of office)
2015/07/23 10:58:54
This code accounted for bytes_allocated before we
ofrobots
2015/07/23 16:31:34
Okay, I understand why this case is here in the fi
| |
1490 heap()->incremental_marking()->Step(bytes_allocated, | |
1491 IncrementalMarking::GC_VIA_STACK_GUARD); | |
1492 top_on_previous_step_ = to_space_.page_low(); | |
1493 if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes); | 1498 if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes); |
1494 return AllocateRawAligned(size_in_bytes, alignment); | 1499 return AllocateRawAligned(size_in_bytes, alignment); |
1495 } else { | 1500 } else { |
1496 return AllocationResult::Retry(); | 1501 return AllocationResult::Retry(); |
1497 } | 1502 } |
1498 } | 1503 } |
1499 | 1504 |
1500 | 1505 |
1501 #ifdef VERIFY_HEAP | 1506 #ifdef VERIFY_HEAP |
1502 // We do not use the SemiSpaceIterator because verification doesn't assume | 1507 // We do not use the SemiSpaceIterator because verification doesn't assume |
(...skipping 1636 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3139 object->ShortPrint(); | 3144 object->ShortPrint(); |
3140 PrintF("\n"); | 3145 PrintF("\n"); |
3141 } | 3146 } |
3142 printf(" --------------------------------------\n"); | 3147 printf(" --------------------------------------\n"); |
3143 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3148 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3144 } | 3149 } |
3145 | 3150 |
3146 #endif // DEBUG | 3151 #endif // DEBUG |
3147 } // namespace internal | 3152 } // namespace internal |
3148 } // namespace v8 | 3153 } // namespace v8 |
OLD | NEW |